2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
32 * Jérôme Glisse <glisse@freedesktop.org>
42 #include <sys/ioctl.h>
45 #include "main/simple_list.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
52 /* no seriously texmem.c is this screwed up */
53 struct bo_legacy_texture_object
{
54 driTextureObject base
;
55 struct bo_legacy
*parent
;
59 struct radeon_bo base
;
64 int got_dri_texture_obj
;
66 struct bo_legacy_texture_object
*tobj
;
70 struct bo_legacy
*next
, *prev
;
71 struct bo_legacy
*pnext
, *pprev
;
74 struct bo_manager_legacy
{
75 struct radeon_bo_manager base
;
77 unsigned nfree_handles
;
78 unsigned cfree_handles
;
81 struct bo_legacy pending_bos
;
83 uint32_t texture_offset
;
84 unsigned dma_alloc_size
;
85 uint32_t dma_buf_count
;
87 driTextureObject texture_swapped
;
88 driTexHeap
*texture_heap
;
89 struct radeon_screen
*screen
;
90 unsigned *free_handles
;
93 static void bo_legacy_tobj_destroy(void *data
, driTextureObject
*t
)
95 struct bo_legacy_texture_object
*tobj
= (struct bo_legacy_texture_object
*)t
;
98 tobj
->parent
->got_dri_texture_obj
= 0;
99 tobj
->parent
->validated
= 0;
103 static void inline clean_handles(struct bo_manager_legacy
*bom
)
105 while (bom
->cfree_handles
> 0 &&
106 !bom
->free_handles
[bom
->cfree_handles
- 1])
107 bom
->cfree_handles
--;
110 static int legacy_new_handle(struct bo_manager_legacy
*bom
, uint32_t *handle
)
115 if (bom
->nhandle
== 0xFFFFFFFF) {
118 if (bom
->cfree_handles
> 0) {
119 tmp
= bom
->free_handles
[--bom
->cfree_handles
];
122 bom
->cfree_handles
= 0;
123 tmp
= bom
->nhandle
++;
130 static int legacy_free_handle(struct bo_manager_legacy
*bom
, uint32_t handle
)
137 if (handle
== (bom
->nhandle
- 1)) {
141 for (i
= bom
->cfree_handles
- 1; i
>= 0; i
--) {
142 if (bom
->free_handles
[i
] == (bom
->nhandle
- 1)) {
144 bom
->free_handles
[i
] = 0;
150 if (bom
->cfree_handles
< bom
->nfree_handles
) {
151 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
154 bom
->nfree_handles
+= 0x100;
155 handles
= (uint32_t*)realloc(bom
->free_handles
, bom
->nfree_handles
* 4);
156 if (handles
== NULL
) {
157 bom
->nfree_handles
-= 0x100;
160 bom
->free_handles
= handles
;
161 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
165 static void legacy_get_current_age(struct bo_manager_legacy
*boml
)
167 drm_radeon_getparam_t gp
;
170 if (IS_R300_CLASS(boml
->screen
)) {
171 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
172 gp
.value
= (int *)&boml
->current_age
;
173 r
= drmCommandWriteRead(boml
->base
.fd
, DRM_RADEON_GETPARAM
,
176 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
, r
);
180 boml
->current_age
= boml
->screen
->scratch
[3];
183 static int legacy_is_pending(struct radeon_bo
*bo
)
185 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
186 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
188 if (bo_legacy
->is_pending
<= 0) {
189 bo_legacy
->is_pending
= 0;
192 if (boml
->current_age
>= bo_legacy
->pending
) {
193 if (boml
->pending_bos
.pprev
== bo_legacy
) {
194 boml
->pending_bos
.pprev
= bo_legacy
->pprev
;
196 bo_legacy
->pprev
->pnext
= bo_legacy
->pnext
;
197 if (bo_legacy
->pnext
) {
198 bo_legacy
->pnext
->pprev
= bo_legacy
->pprev
;
200 assert(bo_legacy
->is_pending
<= bo
->cref
);
201 while (bo_legacy
->is_pending
--) {
202 bo
= radeon_bo_unref(bo
);
207 bo_legacy
->is_pending
= 0;
214 static int legacy_wait_pending(struct radeon_bo
*bo
)
216 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
217 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
219 if (!bo_legacy
->is_pending
) {
222 /* FIXME: lockup and userspace busy looping that's all the folks */
223 legacy_get_current_age(boml
);
224 while (legacy_is_pending(bo
)) {
226 legacy_get_current_age(boml
);
231 static void legacy_track_pending(struct bo_manager_legacy
*boml
, int debug
)
233 struct bo_legacy
*bo_legacy
;
234 struct bo_legacy
*next
;
236 legacy_get_current_age(boml
);
237 bo_legacy
= boml
->pending_bos
.pnext
;
240 fprintf(stderr
,"pending %p %d %d %d\n", bo_legacy
, bo_legacy
->base
.size
,
241 boml
->current_age
, bo_legacy
->pending
);
242 next
= bo_legacy
->pnext
;
243 if (legacy_is_pending(&(bo_legacy
->base
))) {
249 static int legacy_wait_any_pending(struct bo_manager_legacy
*boml
)
251 struct bo_legacy
*bo_legacy
;
253 legacy_get_current_age(boml
);
254 bo_legacy
= boml
->pending_bos
.pnext
;
257 legacy_wait_pending(&bo_legacy
->base
);
261 static struct bo_legacy
*bo_allocate(struct bo_manager_legacy
*boml
,
267 struct bo_legacy
*bo_legacy
;
271 pgsize
= getpagesize() - 1;
273 size
= (size
+ pgsize
) & ~pgsize
;
275 bo_legacy
= (struct bo_legacy
*)calloc(1, sizeof(struct bo_legacy
));
276 if (bo_legacy
== NULL
) {
279 bo_legacy
->base
.bom
= (struct radeon_bo_manager
*)boml
;
280 bo_legacy
->base
.handle
= 0;
281 bo_legacy
->base
.size
= size
;
282 bo_legacy
->base
.alignment
= alignment
;
283 bo_legacy
->base
.domains
= domains
;
284 bo_legacy
->base
.flags
= flags
;
285 bo_legacy
->base
.ptr
= NULL
;
286 bo_legacy
->map_count
= 0;
287 bo_legacy
->next
= NULL
;
288 bo_legacy
->prev
= NULL
;
289 bo_legacy
->got_dri_texture_obj
= 0;
290 bo_legacy
->pnext
= NULL
;
291 bo_legacy
->pprev
= NULL
;
292 bo_legacy
->next
= boml
->bos
.next
;
293 bo_legacy
->prev
= &boml
->bos
;
294 boml
->bos
.next
= bo_legacy
;
295 if (bo_legacy
->next
) {
296 bo_legacy
->next
->prev
= bo_legacy
;
301 static int bo_dma_alloc(struct radeon_bo
*bo
)
303 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
304 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
305 drm_radeon_mem_alloc_t alloc
;
310 /* align size on 4Kb */
311 size
= (((4 * 1024) - 1) + bo
->size
) & ~((4 * 1024) - 1);
312 alloc
.region
= RADEON_MEM_REGION_GART
;
313 alloc
.alignment
= bo_legacy
->base
.alignment
;
315 alloc
.region_offset
= &base_offset
;
316 r
= drmCommandWriteRead(bo
->bom
->fd
,
321 /* ptr is set to NULL if dma allocation failed */
322 bo_legacy
->ptr
= NULL
;
325 bo_legacy
->ptr
= boml
->screen
->gartTextures
.map
+ base_offset
;
326 bo_legacy
->offset
= boml
->screen
->gart_texture_offset
+ base_offset
;
328 boml
->dma_alloc_size
+= size
;
329 boml
->dma_buf_count
++;
333 static int bo_dma_free(struct radeon_bo
*bo
)
335 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
336 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
337 drm_radeon_mem_free_t memfree
;
340 if (bo_legacy
->ptr
== NULL
) {
341 /* ptr is set to NULL if dma allocation failed */
344 legacy_get_current_age(boml
);
345 memfree
.region
= RADEON_MEM_REGION_GART
;
346 memfree
.region_offset
= bo_legacy
->offset
;
347 memfree
.region_offset
-= boml
->screen
->gart_texture_offset
;
348 r
= drmCommandWrite(boml
->base
.fd
,
353 fprintf(stderr
, "Failed to free bo[%p] at %08x\n",
354 &bo_legacy
->base
, memfree
.region_offset
);
355 fprintf(stderr
, "ret = %s\n", strerror(-r
));
358 boml
->dma_alloc_size
-= bo_legacy
->base
.size
;
359 boml
->dma_buf_count
--;
363 static void bo_free(struct bo_legacy
*bo_legacy
)
365 struct bo_manager_legacy
*boml
;
367 if (bo_legacy
== NULL
) {
370 boml
= (struct bo_manager_legacy
*)bo_legacy
->base
.bom
;
371 bo_legacy
->prev
->next
= bo_legacy
->next
;
372 if (bo_legacy
->next
) {
373 bo_legacy
->next
->prev
= bo_legacy
->prev
;
375 if (!bo_legacy
->static_bo
) {
376 legacy_free_handle(boml
, bo_legacy
->base
.handle
);
377 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
379 bo_dma_free(&bo_legacy
->base
);
381 driDestroyTextureObject(&bo_legacy
->tobj
->base
);
382 bo_legacy
->tobj
= NULL
;
383 /* free backing store */
384 free(bo_legacy
->ptr
);
387 memset(bo_legacy
, 0 , sizeof(struct bo_legacy
));
391 static struct radeon_bo
*bo_open(struct radeon_bo_manager
*bom
,
398 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
399 struct bo_legacy
*bo_legacy
;
403 bo_legacy
= boml
->bos
.next
;
405 if (bo_legacy
->base
.handle
== handle
) {
406 radeon_bo_ref(&(bo_legacy
->base
));
407 return (struct radeon_bo
*)bo_legacy
;
409 bo_legacy
= bo_legacy
->next
;
414 bo_legacy
= bo_allocate(boml
, size
, alignment
, domains
, flags
);
415 bo_legacy
->static_bo
= 0;
416 r
= legacy_new_handle(boml
, &bo_legacy
->base
.handle
);
421 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
423 legacy_track_pending(boml
, 0);
426 r
= bo_dma_alloc(&(bo_legacy
->base
));
428 if (legacy_wait_any_pending(boml
) == -1) {
436 bo_legacy
->ptr
= malloc(bo_legacy
->base
.size
);
437 if (bo_legacy
->ptr
== NULL
) {
442 radeon_bo_ref(&(bo_legacy
->base
));
443 return (struct radeon_bo
*)bo_legacy
;
446 static void bo_ref(struct radeon_bo
*bo
)
450 static struct radeon_bo
*bo_unref(struct radeon_bo
*bo
)
452 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
455 bo_legacy
->prev
->next
= bo_legacy
->next
;
456 if (bo_legacy
->next
) {
457 bo_legacy
->next
->prev
= bo_legacy
->prev
;
459 if (!bo_legacy
->is_pending
) {
467 static int bo_map(struct radeon_bo
*bo
, int write
)
469 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
470 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
472 legacy_wait_pending(bo
);
473 bo_legacy
->validated
= 0;
474 bo_legacy
->dirty
= 1;
475 bo_legacy
->map_count
++;
476 bo
->ptr
= bo_legacy
->ptr
;
477 /* Read the first pixel in the frame buffer. This should
478 * be a noop, right? In fact without this conform fails as reading
479 * from the framebuffer sometimes produces old results -- the
480 * on-card read cache gets mixed up and doesn't notice that the
481 * framebuffer has been updated.
483 * Note that we should probably be reading some otherwise unused
484 * region of VRAM, otherwise we might get incorrect results when
485 * reading pixels from the top left of the screen.
487 * I found this problem on an R420 with glean's texCube test.
488 * Note that the R200 span code also *writes* the first pixel in the
489 * framebuffer, but I've found this to be unnecessary.
490 * -- Nicolai Hähnle, June 2008
492 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
494 volatile int *buf
= (int*)boml
->screen
->driScreen
->pFB
;
500 static int bo_unmap(struct radeon_bo
*bo
)
502 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
504 if (--bo_legacy
->map_count
> 0) {
511 static struct radeon_bo_funcs bo_legacy_funcs
= {
519 static int bo_vram_validate(struct radeon_bo
*bo
,
523 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
524 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
527 if (!bo_legacy
->got_dri_texture_obj
) {
528 bo_legacy
->tobj
= CALLOC(sizeof(struct bo_legacy_texture_object
));
529 bo_legacy
->tobj
->parent
= bo_legacy
;
530 make_empty_list(&bo_legacy
->tobj
->base
);
531 bo_legacy
->tobj
->base
.totalSize
= bo
->size
;
532 r
= driAllocateTexture(&boml
->texture_heap
, 1,
533 &bo_legacy
->tobj
->base
);
535 uint8_t *segfault
=NULL
;
536 fprintf(stderr
, "Ouch! vram_validate failed %d\n", r
);
540 bo_legacy
->offset
= boml
->texture_offset
+
541 bo_legacy
->tobj
->base
.memBlock
->ofs
;
542 bo_legacy
->got_dri_texture_obj
= 1;
543 bo_legacy
->dirty
= 1;
546 if (bo_legacy
->got_dri_texture_obj
)
547 driUpdateTextureLRU(&bo_legacy
->tobj
->base
);
549 if (bo_legacy
->dirty
|| bo_legacy
->tobj
->base
.dirty_images
[0]) {
550 /* Copy to VRAM using a blit.
551 * All memory is 4K aligned. We're using 1024 pixels wide blits.
553 drm_radeon_texture_t tex
;
554 drm_radeon_tex_image_t tmp
;
557 tex
.offset
= bo_legacy
->offset
;
559 assert(!(tex
.offset
& 1023));
563 if (bo
->size
< 4096) {
564 tmp
.width
= (bo
->size
+ 3) / 4;
568 tmp
.height
= (bo
->size
+ 4095) / 4096;
570 tmp
.data
= bo_legacy
->ptr
;
571 tex
.format
= RADEON_TXFORMAT_ARGB8888
;
572 tex
.width
= tmp
.width
;
573 tex
.height
= tmp
.height
;
574 tex
.pitch
= MAX2(tmp
.width
/ 16, 1);
576 ret
= drmCommandWriteRead(bo
->bom
->fd
,
579 sizeof(drm_radeon_texture_t
));
581 if (RADEON_DEBUG
& DEBUG_IOCTL
)
582 fprintf(stderr
, "DRM_RADEON_TEXTURE: again!\n");
585 } while (ret
== -EAGAIN
);
586 bo_legacy
->dirty
= 0;
587 bo_legacy
->tobj
->base
.dirty_images
[0] = 0;
592 int radeon_bo_legacy_validate(struct radeon_bo
*bo
,
596 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
599 if (bo_legacy
->map_count
) {
600 fprintf(stderr
, "bo(%p, %d) is mapped (%d) can't valide it.\n",
601 bo
, bo
->size
, bo_legacy
->map_count
);
604 if (bo_legacy
->static_bo
|| bo_legacy
->validated
) {
605 *soffset
= bo_legacy
->offset
;
606 *eoffset
= bo_legacy
->offset
+ bo
->size
;
609 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
610 r
= bo_vram_validate(bo
, soffset
, eoffset
);
615 *soffset
= bo_legacy
->offset
;
616 *eoffset
= bo_legacy
->offset
+ bo
->size
;
617 bo_legacy
->validated
= 1;
621 void radeon_bo_legacy_pending(struct radeon_bo
*bo
, uint32_t pending
)
623 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
624 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
626 bo_legacy
->pending
= pending
;
627 bo_legacy
->is_pending
++;
628 /* add to pending list */
630 if (bo_legacy
->is_pending
> 1) {
633 bo_legacy
->pprev
= boml
->pending_bos
.pprev
;
634 bo_legacy
->pnext
= NULL
;
635 bo_legacy
->pprev
->pnext
= bo_legacy
;
636 boml
->pending_bos
.pprev
= bo_legacy
;
640 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager
*bom
)
642 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
643 struct bo_legacy
*bo_legacy
;
648 bo_legacy
= boml
->bos
.next
;
650 struct bo_legacy
*next
;
652 next
= bo_legacy
->next
;
656 driDestroyTextureHeap(boml
->texture_heap
);
657 free(boml
->free_handles
);
661 static struct bo_legacy
*radeon_legacy_bo_alloc_static(struct bo_manager_legacy
*bom
,
662 int size
, uint32_t offset
)
664 struct bo_legacy
*bo
;
666 bo
= bo_allocate(bom
, size
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
670 bo
->offset
= offset
+ bom
->fb_location
;
671 bo
->base
.handle
= bo
->offset
;
672 bo
->ptr
= bom
->screen
->driScreen
->pFB
+ offset
;
673 if (bo
->base
.handle
> bom
->nhandle
) {
674 bom
->nhandle
= bo
->base
.handle
+ 1;
676 radeon_bo_ref(&(bo
->base
));
680 struct radeon_bo_manager
*radeon_bo_manager_legacy_ctor(struct radeon_screen
*scrn
)
682 struct bo_manager_legacy
*bom
;
683 struct bo_legacy
*bo
;
686 bom
= (struct bo_manager_legacy
*)
687 calloc(1, sizeof(struct bo_manager_legacy
));
692 make_empty_list(&bom
->texture_swapped
);
694 bom
->texture_heap
= driCreateTextureHeap(0,
698 RADEON_NR_TEX_REGIONS
,
699 (drmTextureRegionPtr
)scrn
->sarea
->tex_list
[0],
700 &scrn
->sarea
->tex_age
[0],
701 &bom
->texture_swapped
,
702 sizeof(struct bo_legacy_texture_object
),
703 &bo_legacy_tobj_destroy
);
704 bom
->texture_offset
= scrn
->texOffset
[0];
706 bom
->base
.funcs
= &bo_legacy_funcs
;
707 bom
->base
.fd
= scrn
->driScreen
->fd
;
708 bom
->bos
.next
= NULL
;
709 bom
->bos
.prev
= NULL
;
710 bom
->pending_bos
.pprev
= &bom
->pending_bos
;
711 bom
->pending_bos
.pnext
= NULL
;
713 bom
->fb_location
= scrn
->fbLocation
;
715 bom
->cfree_handles
= 0;
716 bom
->nfree_handles
= 0x400;
717 bom
->free_handles
= (uint32_t*)malloc(bom
->nfree_handles
* 4);
718 if (bom
->free_handles
== NULL
) {
719 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
723 /* biggest framebuffer size */
727 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->frontOffset
);
729 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
732 if (scrn
->sarea
->tiling_enabled
) {
733 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
737 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->backOffset
);
739 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
742 if (scrn
->sarea
->tiling_enabled
) {
743 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
747 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->depthOffset
);
749 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
753 if (scrn
->sarea
->tiling_enabled
) {
754 bo
->base
.flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
755 bo
->base
.flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
757 return (struct radeon_bo_manager
*)bom
;
760 void radeon_bo_legacy_texture_age(struct radeon_bo_manager
*bom
)
762 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
763 DRI_AGE_TEXTURES(boml
->texture_heap
);
766 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo
*bo
)
768 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
770 if (bo_legacy
->static_bo
|| (bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
776 int radeon_legacy_bo_is_static(struct radeon_bo
*bo
)
778 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
779 return bo_legacy
->static_bo
;