2 * Copyright © 2008 Nicolai Haehnle
3 * Copyright © 2008 Dave Airlie
4 * Copyright © 2008 Jérôme Glisse
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
29 * Aapo Tahkola <aet@rasterburn.org>
30 * Nicolai Haehnle <prefect_@gmx.net>
32 * Jérôme Glisse <glisse@freedesktop.org>
42 #include <sys/ioctl.h>
45 #include "main/simple_list.h"
48 #include "radeon_drm.h"
49 #include "radeon_common.h"
50 #include "radeon_bocs_wrapper.h"
54 struct radeon_bo base
;
60 int got_dri_texture_obj
;
63 driTextureObject dri_texture_obj
;
65 struct bo_legacy
*next
, *prev
;
66 struct bo_legacy
*pnext
, *pprev
;
69 struct bo_manager_legacy
{
70 struct radeon_bo_manager base
;
72 unsigned nfree_handles
;
73 unsigned cfree_handles
;
76 struct bo_legacy pending_bos
;
78 uint32_t texture_offset
;
79 unsigned dma_alloc_size
;
80 uint32_t dma_buf_count
;
82 driTextureObject texture_swapped
;
83 driTexHeap
*texture_heap
;
84 struct radeon_screen
*screen
;
85 unsigned *free_handles
;
88 #define container_of(ptr, type, member) ({ \
89 const __typeof( ((type *)0)->member ) *__mptr = (ptr); \
90 (type *)( (char *)__mptr - offsetof(type,member) );})
92 static void bo_legacy_tobj_destroy(void *data
, driTextureObject
*t
)
94 struct bo_legacy
*bo_legacy
= container_of(t
, struct bo_legacy
, dri_texture_obj
);
97 bo_legacy
->got_dri_texture_obj
= 0;
98 bo_legacy
->validated
= 0;
101 static void inline clean_handles(struct bo_manager_legacy
*bom
)
103 while (bom
->cfree_handles
> 0 &&
104 !bom
->free_handles
[bom
->cfree_handles
- 1])
105 bom
->cfree_handles
--;
108 static int legacy_new_handle(struct bo_manager_legacy
*bom
, uint32_t *handle
)
113 if (bom
->nhandle
== 0xFFFFFFFF) {
116 if (bom
->cfree_handles
> 0) {
117 tmp
= bom
->free_handles
[--bom
->cfree_handles
];
120 bom
->cfree_handles
= 0;
121 tmp
= bom
->nhandle
++;
128 static int legacy_free_handle(struct bo_manager_legacy
*bom
, uint32_t handle
)
135 if (handle
== (bom
->nhandle
- 1)) {
139 for (i
= bom
->cfree_handles
- 1; i
>= 0; i
--) {
140 if (bom
->free_handles
[i
] == (bom
->nhandle
- 1)) {
142 bom
->free_handles
[i
] = 0;
148 if (bom
->cfree_handles
< bom
->nfree_handles
) {
149 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
152 bom
->nfree_handles
+= 0x100;
153 handles
= (uint32_t*)realloc(bom
->free_handles
, bom
->nfree_handles
* 4);
154 if (handles
== NULL
) {
155 bom
->nfree_handles
-= 0x100;
158 bom
->free_handles
= handles
;
159 bom
->free_handles
[bom
->cfree_handles
++] = handle
;
163 static void legacy_get_current_age(struct bo_manager_legacy
*boml
)
165 drm_radeon_getparam_t gp
;
168 if (IS_R300_CLASS(boml
->screen
)) {
169 gp
.param
= RADEON_PARAM_LAST_CLEAR
;
170 gp
.value
= (int *)&boml
->current_age
;
171 r
= drmCommandWriteRead(boml
->base
.fd
, DRM_RADEON_GETPARAM
,
174 fprintf(stderr
, "%s: drmRadeonGetParam: %d\n", __FUNCTION__
, r
);
178 boml
->current_age
= boml
->screen
->scratch
[3];
181 static int legacy_is_pending(struct radeon_bo
*bo
)
183 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
184 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
186 if (bo_legacy
->is_pending
<= 0) {
187 bo_legacy
->is_pending
= 0;
190 if (boml
->current_age
>= bo_legacy
->pending
) {
191 if (boml
->pending_bos
.pprev
== bo_legacy
) {
192 boml
->pending_bos
.pprev
= bo_legacy
->pprev
;
194 bo_legacy
->pprev
->pnext
= bo_legacy
->pnext
;
195 if (bo_legacy
->pnext
) {
196 bo_legacy
->pnext
->pprev
= bo_legacy
->pprev
;
198 assert(bo_legacy
->is_pending
<= bo
->cref
);
199 while (bo_legacy
->is_pending
--) {
200 bo
= radeon_bo_unref(bo
);
205 bo_legacy
->is_pending
= 0;
212 static int legacy_wait_pending(struct radeon_bo
*bo
)
214 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
215 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
217 if (!bo_legacy
->is_pending
) {
220 /* FIXME: lockup and userspace busy looping that's all the folks */
221 legacy_get_current_age(boml
);
222 while (legacy_is_pending(bo
)) {
224 legacy_get_current_age(boml
);
229 static void legacy_track_pending(struct bo_manager_legacy
*boml
, int debug
)
231 struct bo_legacy
*bo_legacy
;
232 struct bo_legacy
*next
;
234 legacy_get_current_age(boml
);
235 bo_legacy
= boml
->pending_bos
.pnext
;
238 fprintf(stderr
,"pending %p %d %d %d\n", bo_legacy
, bo_legacy
->base
.size
,
239 boml
->current_age
, bo_legacy
->pending
);
240 next
= bo_legacy
->pnext
;
241 if (legacy_is_pending(&(bo_legacy
->base
))) {
247 static int legacy_wait_any_pending(struct bo_manager_legacy
*boml
)
249 struct bo_legacy
*bo_legacy
;
250 struct bo_legacy
*next
;
252 legacy_get_current_age(boml
);
253 bo_legacy
= boml
->pending_bos
.pnext
;
256 legacy_wait_pending(&bo_legacy
->base
);
260 static struct bo_legacy
*bo_allocate(struct bo_manager_legacy
*boml
,
266 struct bo_legacy
*bo_legacy
;
270 pgsize
= getpagesize() - 1;
272 size
= (size
+ pgsize
) & ~pgsize
;
274 bo_legacy
= (struct bo_legacy
*)calloc(1, sizeof(struct bo_legacy
));
275 if (bo_legacy
== NULL
) {
278 bo_legacy
->base
.bom
= (struct radeon_bo_manager
*)boml
;
279 bo_legacy
->base
.handle
= 0;
280 bo_legacy
->base
.size
= size
;
281 bo_legacy
->base
.alignment
= alignment
;
282 bo_legacy
->base
.domains
= domains
;
283 bo_legacy
->base
.flags
= flags
;
284 bo_legacy
->base
.ptr
= NULL
;
285 bo_legacy
->map_count
= 0;
286 bo_legacy
->next
= NULL
;
287 bo_legacy
->prev
= NULL
;
288 bo_legacy
->got_dri_texture_obj
= 0;
289 bo_legacy
->pnext
= NULL
;
290 bo_legacy
->pprev
= NULL
;
291 bo_legacy
->next
= boml
->bos
.next
;
292 bo_legacy
->prev
= &boml
->bos
;
293 boml
->bos
.next
= bo_legacy
;
294 if (bo_legacy
->next
) {
295 bo_legacy
->next
->prev
= bo_legacy
;
300 static int bo_dma_alloc(struct radeon_bo
*bo
)
302 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
303 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
304 drm_radeon_mem_alloc_t alloc
;
309 /* align size on 4Kb */
310 size
= (((4 * 1024) - 1) + bo
->size
) & ~((4 * 1024) - 1);
311 alloc
.region
= RADEON_MEM_REGION_GART
;
312 alloc
.alignment
= bo_legacy
->base
.alignment
;
314 alloc
.region_offset
= &base_offset
;
315 r
= drmCommandWriteRead(bo
->bom
->fd
,
320 /* ptr is set to NULL if dma allocation failed */
321 bo_legacy
->ptr
= NULL
;
324 bo_legacy
->ptr
= boml
->screen
->gartTextures
.map
+ base_offset
;
325 bo_legacy
->offset
= boml
->screen
->gart_texture_offset
+ base_offset
;
327 boml
->dma_alloc_size
+= size
;
328 boml
->dma_buf_count
++;
332 static int bo_dma_free(struct radeon_bo
*bo
)
334 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
335 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
336 drm_radeon_mem_free_t memfree
;
339 if (bo_legacy
->ptr
== NULL
) {
340 /* ptr is set to NULL if dma allocation failed */
343 legacy_get_current_age(boml
);
344 memfree
.region
= RADEON_MEM_REGION_GART
;
345 memfree
.region_offset
= bo_legacy
->offset
;
346 memfree
.region_offset
-= boml
->screen
->gart_texture_offset
;
347 r
= drmCommandWrite(boml
->base
.fd
,
352 fprintf(stderr
, "Failed to free bo[%p] at %08x\n",
353 &bo_legacy
->base
, memfree
.region_offset
);
354 fprintf(stderr
, "ret = %s\n", strerror(-r
));
357 boml
->dma_alloc_size
-= bo_legacy
->base
.size
;
358 boml
->dma_buf_count
--;
362 static void bo_free(struct bo_legacy
*bo_legacy
)
364 struct bo_manager_legacy
*boml
;
366 if (bo_legacy
== NULL
) {
369 boml
= (struct bo_manager_legacy
*)bo_legacy
->base
.bom
;
370 bo_legacy
->prev
->next
= bo_legacy
->next
;
371 if (bo_legacy
->next
) {
372 bo_legacy
->next
->prev
= bo_legacy
->prev
;
374 if (!bo_legacy
->static_bo
) {
375 legacy_free_handle(boml
, bo_legacy
->base
.handle
);
376 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
378 bo_dma_free(&bo_legacy
->base
);
380 if (bo_legacy
->got_dri_texture_obj
)
381 driCleanupTextureObject(&bo_legacy
->dri_texture_obj
);
383 /* free backing store */
384 free(bo_legacy
->ptr
);
390 static struct radeon_bo
*bo_open(struct radeon_bo_manager
*bom
,
397 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
398 struct bo_legacy
*bo_legacy
;
402 bo_legacy
= boml
->bos
.next
;
404 if (bo_legacy
->base
.handle
== handle
) {
405 radeon_bo_ref(&(bo_legacy
->base
));
406 return (struct radeon_bo
*)bo_legacy
;
408 bo_legacy
= bo_legacy
->next
;
413 bo_legacy
= bo_allocate(boml
, size
, alignment
, domains
, flags
);
414 bo_legacy
->static_bo
= 0;
415 r
= legacy_new_handle(boml
, &bo_legacy
->base
.handle
);
420 if (bo_legacy
->base
.domains
& RADEON_GEM_DOMAIN_GTT
) {
422 legacy_track_pending(boml
, 0);
425 r
= bo_dma_alloc(&(bo_legacy
->base
));
427 if (legacy_wait_any_pending(boml
) == -1) {
435 bo_legacy
->ptr
= malloc(bo_legacy
->base
.size
);
436 if (bo_legacy
->ptr
== NULL
) {
441 radeon_bo_ref(&(bo_legacy
->base
));
442 return (struct radeon_bo
*)bo_legacy
;
445 static void bo_ref(struct radeon_bo
*bo
)
449 static struct radeon_bo
*bo_unref(struct radeon_bo
*bo
)
451 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
454 bo_legacy
->prev
->next
= bo_legacy
->next
;
455 if (bo_legacy
->next
) {
456 bo_legacy
->next
->prev
= bo_legacy
->prev
;
458 if (!bo_legacy
->is_pending
) {
466 static int bo_map(struct radeon_bo
*bo
, int write
)
468 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
469 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
471 legacy_wait_pending(bo
);
472 bo_legacy
->validated
= 0;
473 bo_legacy
->dirty
= 1;
474 bo_legacy
->map_count
++;
475 bo
->ptr
= bo_legacy
->ptr
;
476 /* Read the first pixel in the frame buffer. This should
477 * be a noop, right? In fact without this conform fails as reading
478 * from the framebuffer sometimes produces old results -- the
479 * on-card read cache gets mixed up and doesn't notice that the
480 * framebuffer has been updated.
482 * Note that we should probably be reading some otherwise unused
483 * region of VRAM, otherwise we might get incorrect results when
484 * reading pixels from the top left of the screen.
486 * I found this problem on an R420 with glean's texCube test.
487 * Note that the R200 span code also *writes* the first pixel in the
488 * framebuffer, but I've found this to be unnecessary.
489 * -- Nicolai Hähnle, June 2008
491 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
493 volatile int *buf
= (int*)boml
->screen
->driScreen
->pFB
;
499 static int bo_unmap(struct radeon_bo
*bo
)
501 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
503 if (--bo_legacy
->map_count
> 0) {
510 static struct radeon_bo_funcs bo_legacy_funcs
= {
518 static int bo_vram_validate(struct radeon_bo
*bo
,
522 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
523 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
526 if (!bo_legacy
->got_dri_texture_obj
) {
527 make_empty_list(&bo_legacy
->dri_texture_obj
);
528 bo_legacy
->dri_texture_obj
.totalSize
= bo
->size
;
529 r
= driAllocateTexture(&boml
->texture_heap
, 1,
530 &bo_legacy
->dri_texture_obj
);
532 uint8_t *segfault
=NULL
;
533 fprintf(stderr
, "Ouch! vram_validate failed %d\n", r
);
537 bo_legacy
->offset
= boml
->texture_offset
+
538 bo_legacy
->dri_texture_obj
.memBlock
->ofs
;
539 bo_legacy
->got_dri_texture_obj
= 1;
540 bo_legacy
->dirty
= 1;
543 if (bo_legacy
->got_dri_texture_obj
)
544 driUpdateTextureLRU(&bo_legacy
->dri_texture_obj
);
545 if (bo_legacy
->dirty
) {
546 /* Copy to VRAM using a blit.
547 * All memory is 4K aligned. We're using 1024 pixels wide blits.
549 drm_radeon_texture_t tex
;
550 drm_radeon_tex_image_t tmp
;
553 tex
.offset
= bo_legacy
->offset
;
555 assert(!(tex
.offset
& 1023));
559 if (bo
->size
< 4096) {
560 tmp
.width
= (bo
->size
+ 3) / 4;
564 tmp
.height
= (bo
->size
+ 4095) / 4096;
566 tmp
.data
= bo_legacy
->ptr
;
567 tex
.format
= RADEON_TXFORMAT_ARGB8888
;
568 tex
.width
= tmp
.width
;
569 tex
.height
= tmp
.height
;
570 tex
.pitch
= MAX2(tmp
.width
/ 16, 1);
572 ret
= drmCommandWriteRead(bo
->bom
->fd
,
575 sizeof(drm_radeon_texture_t
));
577 if (RADEON_DEBUG
& DEBUG_IOCTL
)
578 fprintf(stderr
, "DRM_RADEON_TEXTURE: again!\n");
581 } while (ret
== -EAGAIN
);
582 bo_legacy
->dirty
= 0;
587 int radeon_bo_legacy_validate(struct radeon_bo
*bo
,
591 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
594 if (bo_legacy
->map_count
) {
595 fprintf(stderr
, "bo(%p, %d) is mapped (%d) can't valide it.\n",
596 bo
, bo
->size
, bo_legacy
->map_count
);
599 if (bo_legacy
->static_bo
|| bo_legacy
->validated
) {
600 *soffset
= bo_legacy
->offset
;
601 *eoffset
= bo_legacy
->offset
+ bo
->size
;
604 if (!(bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
605 r
= bo_vram_validate(bo
, soffset
, eoffset
);
610 *soffset
= bo_legacy
->offset
;
611 *eoffset
= bo_legacy
->offset
+ bo
->size
;
612 bo_legacy
->validated
= 1;
616 void radeon_bo_legacy_pending(struct radeon_bo
*bo
, uint32_t pending
)
618 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bo
->bom
;
619 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
621 bo_legacy
->pending
= pending
;
622 bo_legacy
->is_pending
++;
623 /* add to pending list */
625 if (bo_legacy
->is_pending
> 1) {
628 bo_legacy
->pprev
= boml
->pending_bos
.pprev
;
629 bo_legacy
->pnext
= NULL
;
630 bo_legacy
->pprev
->pnext
= bo_legacy
;
631 boml
->pending_bos
.pprev
= bo_legacy
;
635 void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager
*bom
)
637 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
638 struct bo_legacy
*bo_legacy
;
643 bo_legacy
= boml
->bos
.next
;
645 struct bo_legacy
*next
;
647 next
= bo_legacy
->next
;
651 free(boml
->free_handles
);
655 static struct bo_legacy
*radeon_legacy_bo_alloc_static(struct bo_manager_legacy
*bom
,
656 int size
, uint32_t offset
)
658 struct bo_legacy
*bo
;
660 bo
= bo_allocate(bom
, size
, 0, RADEON_GEM_DOMAIN_VRAM
, 0);
664 bo
->offset
= offset
+ bom
->fb_location
;
665 bo
->base
.handle
= bo
->offset
;
666 bo
->ptr
= bom
->screen
->driScreen
->pFB
+ offset
;
667 if (bo
->base
.handle
> bom
->nhandle
) {
668 bom
->nhandle
= bo
->base
.handle
+ 1;
670 radeon_bo_ref(&(bo
->base
));
674 struct radeon_bo_manager
*radeon_bo_manager_legacy_ctor(struct radeon_screen
*scrn
)
676 struct bo_manager_legacy
*bom
;
677 struct bo_legacy
*bo
;
680 bom
= (struct bo_manager_legacy
*)
681 calloc(1, sizeof(struct bo_manager_legacy
));
686 bom
->texture_heap
= driCreateTextureHeap(0,
690 RADEON_NR_TEX_REGIONS
,
691 (drmTextureRegionPtr
)scrn
->sarea
->tex_list
[0],
692 &scrn
->sarea
->tex_age
[0],
693 &bom
->texture_swapped
,
694 sizeof(struct bo_legacy
),
695 &bo_legacy_tobj_destroy
);
696 bom
->texture_offset
= scrn
->texOffset
[0];
698 bom
->base
.funcs
= &bo_legacy_funcs
;
699 bom
->base
.fd
= scrn
->driScreen
->fd
;
700 bom
->bos
.next
= NULL
;
701 bom
->bos
.prev
= NULL
;
702 bom
->pending_bos
.pprev
= &bom
->pending_bos
;
703 bom
->pending_bos
.pnext
= NULL
;
705 bom
->fb_location
= scrn
->fbLocation
;
707 bom
->cfree_handles
= 0;
708 bom
->nfree_handles
= 0x400;
709 bom
->free_handles
= (uint32_t*)malloc(bom
->nfree_handles
* 4);
710 if (bom
->free_handles
== NULL
) {
711 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
715 /* biggest framebuffer size */
719 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->frontOffset
);
721 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
724 if (scrn
->sarea
->tiling_enabled
) {
725 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
729 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->backOffset
);
731 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
734 if (scrn
->sarea
->tiling_enabled
) {
735 bo
->base
.flags
= RADEON_BO_FLAGS_MACRO_TILE
;
739 bo
= radeon_legacy_bo_alloc_static(bom
, size
, bom
->screen
->depthOffset
);
741 radeon_bo_manager_legacy_dtor((struct radeon_bo_manager
*)bom
);
745 if (scrn
->sarea
->tiling_enabled
) {
746 bo
->base
.flags
|= RADEON_BO_FLAGS_MACRO_TILE
;
747 bo
->base
.flags
|= RADEON_BO_FLAGS_MICRO_TILE
;
749 return (struct radeon_bo_manager
*)bom
;
752 void radeon_bo_legacy_texture_age(struct radeon_bo_manager
*bom
)
754 struct bo_manager_legacy
*boml
= (struct bo_manager_legacy
*)bom
;
755 DRI_AGE_TEXTURES(boml
->texture_heap
);
758 unsigned radeon_bo_legacy_relocs_size(struct radeon_bo
*bo
)
760 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
762 if (bo_legacy
->static_bo
|| (bo
->domains
& RADEON_GEM_DOMAIN_GTT
)) {
768 int radeon_legacy_bo_is_static(struct radeon_bo
*bo
)
770 struct bo_legacy
*bo_legacy
= (struct bo_legacy
*)bo
;
771 return bo_legacy
->static_bo
;