#include <sys/mman.h>
#include <sys/ioctl.h>
#include "xf86drm.h"
+#include "texmem.h"
+#include "main/simple_list.h"
+
#include "drm.h"
#include "radeon_drm.h"
-#include "radeon_bo.h"
-#include "radeon_bo_legacy.h"
-#include "radeon_ioctl.h"
-#include "texmem.h"
+#include "radeon_common.h"
+#include "radeon_bocs_wrapper.h"
+
struct bo_legacy {
struct radeon_bo base;
uint32_t fb_location;
uint32_t texture_offset;
unsigned dma_alloc_size;
+ uint32_t dma_buf_count;
unsigned cpendings;
driTextureObject texture_swapped;
driTexHeap *texture_heap;
bo_legacy->validated = 0;
}
+static void inline clean_handles(struct bo_manager_legacy *bom)
+{
+ while (bom->cfree_handles > 0 &&
+ !bom->free_handles[bom->cfree_handles - 1])
+ bom->cfree_handles--;
+
+}
static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
{
uint32_t tmp;
}
if (bom->cfree_handles > 0) {
tmp = bom->free_handles[--bom->cfree_handles];
- while (!bom->free_handles[bom->cfree_handles - 1]) {
- bom->cfree_handles--;
- if (bom->cfree_handles <= 0) {
- bom->cfree_handles = 0;
- }
- }
+ clean_handles(bom);
} else {
bom->cfree_handles = 0;
tmp = bom->nhandle++;
bom->free_handles[i] = 0;
}
}
- while (!bom->free_handles[bom->cfree_handles - 1]) {
- bom->cfree_handles--;
- if (bom->cfree_handles <= 0) {
- bom->cfree_handles = 0;
- }
- }
+ clean_handles(bom);
return 0;
}
if (bom->cfree_handles < bom->nfree_handles) {
drm_radeon_getparam_t gp;
int r;
- gp.param = RADEON_PARAM_LAST_CLEAR;
- gp.value = (int *)&boml->current_age;
- r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
- &gp, sizeof(gp));
- if (r) {
- fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
- exit(1);
- }
+ if (IS_R300_CLASS(boml->screen)) {
+ gp.param = RADEON_PARAM_LAST_CLEAR;
+ gp.value = (int *)&boml->current_age;
+ r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
+ &gp, sizeof(gp));
+ if (r) {
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
+ exit(1);
+ }
+ } else
+ boml->current_age = boml->screen->scratch[3];
}
static int legacy_is_pending(struct radeon_bo *bo)
if (bo_legacy->pnext) {
bo_legacy->pnext->pprev = bo_legacy->pprev;
}
+ assert(bo_legacy->is_pending <= bo->cref);
while (bo_legacy->is_pending--) {
- radeon_bo_unref(bo);
+ bo = radeon_bo_unref(bo);
+ if (!bo)
+ break;
}
- bo_legacy->is_pending = 0;
+ if (bo)
+ bo_legacy->is_pending = 0;
boml->cpendings--;
return 0;
}
return 0;
}
-static void legacy_track_pending(struct bo_manager_legacy *boml)
+static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
{
struct bo_legacy *bo_legacy;
struct bo_legacy *next;
legacy_get_current_age(boml);
bo_legacy = boml->pending_bos.pnext;
while (bo_legacy) {
+ if (debug)
+ fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
+ boml->current_age, bo_legacy->pending);
next = bo_legacy->pnext;
if (legacy_is_pending(&(bo_legacy->base))) {
}
}
}
+static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
+{
+ struct bo_legacy *bo_legacy;
+ struct bo_legacy *next;
+
+ legacy_get_current_age(boml);
+ bo_legacy = boml->pending_bos.pnext;
+ if (!bo_legacy)
+ return -1;
+ legacy_wait_pending(&bo_legacy->base);
+ return 0;
+}
+
static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
uint32_t size,
uint32_t alignment,
+ uint32_t domains,
uint32_t flags)
{
struct bo_legacy *bo_legacy;
+ static int pgsize;
+
+ if (pgsize == 0)
+ pgsize = getpagesize() - 1;
+
+ size = (size + pgsize) & ~pgsize;
bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
if (bo_legacy == NULL) {
bo_legacy->base.handle = 0;
bo_legacy->base.size = size;
bo_legacy->base.alignment = alignment;
+ bo_legacy->base.domains = domains;
bo_legacy->base.flags = flags;
bo_legacy->base.ptr = NULL;
bo_legacy->map_count = 0;
if (r) {
/* ptr is set to NULL if dma allocation failed */
bo_legacy->ptr = NULL;
- exit(0);
return r;
}
bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
bo->size = size;
boml->dma_alloc_size += size;
+ boml->dma_buf_count++;
return 0;
}
return r;
}
boml->dma_alloc_size -= bo_legacy->base.size;
+ boml->dma_buf_count--;
return 0;
}
}
if (!bo_legacy->static_bo) {
legacy_free_handle(boml, bo_legacy->base.handle);
- if (bo_legacy->base.flags & RADEON_GEM_DOMAIN_GTT) {
+ if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
/* dma buffers */
bo_dma_free(&bo_legacy->base);
} else {
uint32_t handle,
uint32_t size,
uint32_t alignment,
+ uint32_t domains,
uint32_t flags)
{
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
return NULL;
}
- bo_legacy = bo_allocate(boml, size, alignment, flags);
+ bo_legacy = bo_allocate(boml, size, alignment, domains, flags);
bo_legacy->static_bo = 0;
r = legacy_new_handle(boml, &bo_legacy->base.handle);
if (r) {
bo_free(bo_legacy);
return NULL;
}
- if (bo_legacy->base.flags & RADEON_GEM_DOMAIN_GTT) {
- legacy_track_pending(boml);
+ if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
+ retry:
+ legacy_track_pending(boml, 0);
/* dma buffers */
+
r = bo_dma_alloc(&(bo_legacy->base));
if (r) {
- fprintf(stderr, "Ran out of GART memory (for %d)!\n", size);
- fprintf(stderr, "Please consider adjusting GARTSize option.\n");
+ if (legacy_wait_any_pending(boml) == -1) {
bo_free(bo_legacy);
- exit(-1);
- return NULL;
+ return NULL;
+ }
+ goto retry;
+ return NULL;
}
} else {
bo_legacy->ptr = malloc(bo_legacy->base.size);
{
}
-static void bo_unref(struct radeon_bo *bo)
+static struct radeon_bo *bo_unref(struct radeon_bo *bo)
{
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
if (!bo_legacy->is_pending) {
bo_free(bo_legacy);
}
+ return NULL;
}
+ return bo;
}
static int bo_map(struct radeon_bo *bo, int write)
* framebuffer, but I've found this to be unnecessary.
* -- Nicolai Hähnle, June 2008
*/
- {
+ if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
int p;
volatile int *buf = (int*)boml->screen->driScreen->pFB;
p = *buf;
}
-
return 0;
}
*eoffset = bo_legacy->offset + bo->size;
return 0;
}
- if (!(bo->flags & RADEON_GEM_DOMAIN_GTT)) {
+ if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
r = bo_vram_validate(bo, soffset, eoffset);
if (r) {
return r;
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
bo_legacy->pending = pending;
- bo_legacy->is_pending += 1;
+ bo_legacy->is_pending++;
/* add to pending list */
radeon_bo_ref(bo);
if (bo_legacy->is_pending > 1) {
boml->cpendings++;
}
-void radeon_bo_manager_legacy_shutdown(struct radeon_bo_manager *bom)
+void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
{
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
struct bo_legacy *bo_legacy;
free(boml);
}
-struct radeon_bo_manager *radeon_bo_manager_legacy(struct radeon_screen *scrn)
+static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
+ int size, uint32_t offset)
+{
+ struct bo_legacy *bo;
+
+ bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+ if (bo == NULL)
+ return NULL;
+ bo->static_bo = 1;
+ bo->offset = offset + bom->fb_location;
+ bo->base.handle = bo->offset;
+ bo->ptr = bom->screen->driScreen->pFB + offset;
+ if (bo->base.handle > bom->nhandle) {
+ bom->nhandle = bo->base.handle + 1;
+ }
+ radeon_bo_ref(&(bo->base));
+ return bo;
+}
+
+struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
{
struct bo_manager_legacy *bom;
struct bo_legacy *bo;
bom->nfree_handles = 0x400;
bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
if (bom->free_handles == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
/* biggest framebuffer size */
size = 4096*4096*4;
+
/* allocate front */
- bo = bo_allocate(bom, size, 0, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
if (scrn->sarea->tiling_enabled) {
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
}
- bo->static_bo = 1;
- bo->offset = bom->screen->frontOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->frontOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
- }
+
/* allocate back */
- bo = bo_allocate(bom, size, 0, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
if (scrn->sarea->tiling_enabled) {
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
}
- bo->static_bo = 1;
- bo->offset = bom->screen->backOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->backOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
- }
+
/* allocate depth */
- bo = bo_allocate(bom, size, 0, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
bo->base.flags = 0;
if (scrn->sarea->tiling_enabled) {
- bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
- }
- bo->static_bo = 1;
- bo->offset = bom->screen->depthOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->depthOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
+ bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
+ bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
}
return (struct radeon_bo_manager*)bom;
}
{
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
- if (bo_legacy->static_bo || (bo->flags & RADEON_GEM_DOMAIN_GTT)) {
+ if (bo_legacy->static_bo || (bo->domains & RADEON_GEM_DOMAIN_GTT)) {
return 0;
}
return bo->size;
}
+
+int radeon_legacy_bo_is_static(struct radeon_bo *bo)
+{
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
+ return bo_legacy->static_bo;
+}
+