* Jérôme Glisse <glisse@freedesktop.org>
*/
#include <stdio.h>
+#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include "xf86drm.h"
+#include "texmem.h"
+#include "main/simple_list.h"
+
#include "drm.h"
#include "radeon_drm.h"
-#include "radeon_bo.h"
-#include "radeon_bo_legacy.h"
-#include "radeon_ioctl.h"
-#include "texmem.h"
+#include "radeon_common.h"
+#include "radeon_bocs_wrapper.h"
+
+/* no seriously texmem.c is this screwed up */
+struct bo_legacy_texture_object {
+ driTextureObject base;
+ struct bo_legacy *parent;
+};
struct bo_legacy {
struct radeon_bo base;
- driTextureObject tobj_base;
int map_count;
uint32_t pending;
int is_pending;
- int validated;
int static_bo;
- int got_dri_texture_obj;
- int dirty;
uint32_t offset;
- driTextureObject dri_texture_obj;
+ struct bo_legacy_texture_object *tobj;
+ int validated;
+ int dirty;
void *ptr;
struct bo_legacy *next, *prev;
struct bo_legacy *pnext, *pprev;
uint32_t fb_location;
uint32_t texture_offset;
unsigned dma_alloc_size;
+ uint32_t dma_buf_count;
unsigned cpendings;
driTextureObject texture_swapped;
driTexHeap *texture_heap;
static void bo_legacy_tobj_destroy(void *data, driTextureObject *t)
{
- struct bo_legacy *bo_legacy;
-
- bo_legacy = (struct bo_legacy*)((char*)t)-sizeof(struct radeon_bo);
- bo_legacy->got_dri_texture_obj = 0;
- bo_legacy->validated = 0;
+ struct bo_legacy_texture_object *tobj = (struct bo_legacy_texture_object *)t;
+
+ if (tobj->parent) {
+ tobj->parent->tobj = NULL;
+ tobj->parent->validated = 0;
+ }
}
+static void inline clean_handles(struct bo_manager_legacy *bom)
+{
+ while (bom->cfree_handles > 0 &&
+ !bom->free_handles[bom->cfree_handles - 1])
+ bom->cfree_handles--;
+
+}
static int legacy_new_handle(struct bo_manager_legacy *bom, uint32_t *handle)
{
uint32_t tmp;
}
if (bom->cfree_handles > 0) {
tmp = bom->free_handles[--bom->cfree_handles];
- while (!bom->free_handles[bom->cfree_handles - 1]) {
- bom->cfree_handles--;
- if (bom->cfree_handles <= 0) {
- bom->cfree_handles = 0;
- }
- }
+ clean_handles(bom);
} else {
bom->cfree_handles = 0;
tmp = bom->nhandle++;
bom->free_handles[i] = 0;
}
}
- while (!bom->free_handles[bom->cfree_handles - 1]) {
- bom->cfree_handles--;
- if (bom->cfree_handles <= 0) {
- bom->cfree_handles = 0;
- }
- }
+ clean_handles(bom);
return 0;
}
if (bom->cfree_handles < bom->nfree_handles) {
drm_radeon_getparam_t gp;
int r;
- gp.param = RADEON_PARAM_LAST_CLEAR;
- gp.value = (int *)&boml->current_age;
- r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
- &gp, sizeof(gp));
- if (r) {
- fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
- exit(1);
- }
+ if (IS_R300_CLASS(boml->screen)) {
+ gp.param = RADEON_PARAM_LAST_CLEAR;
+ gp.value = (int *)&boml->current_age;
+ r = drmCommandWriteRead(boml->base.fd, DRM_RADEON_GETPARAM,
+ &gp, sizeof(gp));
+ if (r) {
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__, r);
+ exit(1);
+ }
+ } else
+ boml->current_age = boml->screen->scratch[3];
}
static int legacy_is_pending(struct radeon_bo *bo)
if (bo_legacy->pnext) {
bo_legacy->pnext->pprev = bo_legacy->pprev;
}
+ assert(bo_legacy->is_pending <= bo->cref);
while (bo_legacy->is_pending--) {
- radeon_bo_unref(bo);
+ bo = radeon_bo_unref(bo);
+ if (!bo)
+ break;
}
- bo_legacy->is_pending = 0;
+ if (bo)
+ bo_legacy->is_pending = 0;
boml->cpendings--;
return 0;
}
return 0;
}
-static void legacy_track_pending(struct bo_manager_legacy *boml)
+static void legacy_track_pending(struct bo_manager_legacy *boml, int debug)
{
struct bo_legacy *bo_legacy;
struct bo_legacy *next;
legacy_get_current_age(boml);
bo_legacy = boml->pending_bos.pnext;
while (bo_legacy) {
+ if (debug)
+ fprintf(stderr,"pending %p %d %d %d\n", bo_legacy, bo_legacy->base.size,
+ boml->current_age, bo_legacy->pending);
next = bo_legacy->pnext;
if (legacy_is_pending(&(bo_legacy->base))) {
}
}
}
+static int legacy_wait_any_pending(struct bo_manager_legacy *boml)
+{
+ struct bo_legacy *bo_legacy;
+
+ legacy_get_current_age(boml);
+ bo_legacy = boml->pending_bos.pnext;
+ if (!bo_legacy)
+ return -1;
+ legacy_wait_pending(&bo_legacy->base);
+ return 0;
+}
+
+static void legacy_kick_all_buffers(struct bo_manager_legacy *boml)
+{
+ struct bo_legacy *legacy;
+
+ legacy = boml->bos.next;
+ while (legacy != &boml->bos) {
+ if (legacy->tobj) {
+ if (legacy->validated) {
+ driDestroyTextureObject(&legacy->tobj->base);
+ legacy->tobj = 0;
+ legacy->validated = 0;
+ }
+ }
+ legacy = legacy->next;
+ }
+}
+
static struct bo_legacy *bo_allocate(struct bo_manager_legacy *boml,
uint32_t size,
uint32_t alignment,
uint32_t flags)
{
struct bo_legacy *bo_legacy;
+ static int pgsize;
+
+ if (pgsize == 0)
+ pgsize = getpagesize() - 1;
+
+ size = (size + pgsize) & ~pgsize;
bo_legacy = (struct bo_legacy*)calloc(1, sizeof(struct bo_legacy));
if (bo_legacy == NULL) {
bo_legacy->map_count = 0;
bo_legacy->next = NULL;
bo_legacy->prev = NULL;
- bo_legacy->got_dri_texture_obj = 0;
bo_legacy->pnext = NULL;
bo_legacy->pprev = NULL;
bo_legacy->next = boml->bos.next;
if (r) {
/* ptr is set to NULL if dma allocation failed */
bo_legacy->ptr = NULL;
- exit(0);
return r;
}
bo_legacy->ptr = boml->screen->gartTextures.map + base_offset;
bo_legacy->offset = boml->screen->gart_texture_offset + base_offset;
bo->size = size;
boml->dma_alloc_size += size;
+ boml->dma_buf_count++;
return 0;
}
return r;
}
boml->dma_alloc_size -= bo_legacy->base.size;
+ boml->dma_buf_count--;
return 0;
}
/* dma buffers */
bo_dma_free(&bo_legacy->base);
} else {
+ driDestroyTextureObject(&bo_legacy->tobj->base);
+ bo_legacy->tobj = NULL;
/* free backing store */
free(bo_legacy->ptr);
}
return NULL;
}
if (bo_legacy->base.domains & RADEON_GEM_DOMAIN_GTT) {
- legacy_track_pending(boml);
+ retry:
+ legacy_track_pending(boml, 0);
/* dma buffers */
+
r = bo_dma_alloc(&(bo_legacy->base));
if (r) {
- fprintf(stderr, "Ran out of GART memory (for %d)!\n", size);
- fprintf(stderr, "Please consider adjusting GARTSize option.\n");
+ if (legacy_wait_any_pending(boml) == -1) {
bo_free(bo_legacy);
- exit(-1);
- return NULL;
+ return NULL;
+ }
+ goto retry;
+ return NULL;
}
} else {
bo_legacy->ptr = malloc(bo_legacy->base.size);
{
}
-static void bo_unref(struct radeon_bo *bo)
+static struct radeon_bo *bo_unref(struct radeon_bo *bo)
{
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
if (!bo_legacy->is_pending) {
bo_free(bo_legacy);
}
+ return NULL;
}
+ return bo;
}
static int bo_map(struct radeon_bo *bo, int write)
* framebuffer, but I've found this to be unnecessary.
* -- Nicolai Hähnle, June 2008
*/
- {
+ if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
int p;
volatile int *buf = (int*)boml->screen->driScreen->pFB;
p = *buf;
}
-
return 0;
}
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
int r;
+ int retry_count = 0, pending_retry = 0;
- if (!bo_legacy->got_dri_texture_obj) {
- make_empty_list(&bo_legacy->dri_texture_obj);
- bo_legacy->dri_texture_obj.totalSize = bo->size;
+ if (!bo_legacy->tobj) {
+ bo_legacy->tobj = CALLOC(sizeof(struct bo_legacy_texture_object));
+ bo_legacy->tobj->parent = bo_legacy;
+ make_empty_list(&bo_legacy->tobj->base);
+ bo_legacy->tobj->base.totalSize = bo->size;
+ retry:
r = driAllocateTexture(&boml->texture_heap, 1,
- &bo_legacy->dri_texture_obj);
+ &bo_legacy->tobj->base);
if (r) {
- uint8_t *segfault=NULL;
- fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
- *segfault=1;
- return -1;
- }
+ pending_retry = 0;
+ while(boml->cpendings && pending_retry++ < 10000) {
+ legacy_track_pending(boml, 0);
+ retry_count++;
+ if (retry_count > 2) {
+ free(bo_legacy->tobj);
+ bo_legacy->tobj = NULL;
+ fprintf(stderr, "Ouch! vram_validate failed %d\n", r);
+ return -1;
+ }
+ goto retry;
+ }
+ }
bo_legacy->offset = boml->texture_offset +
- bo_legacy->dri_texture_obj.memBlock->ofs;
- bo_legacy->got_dri_texture_obj = 1;
+ bo_legacy->tobj->base.memBlock->ofs;
bo_legacy->dirty = 1;
}
- if (bo_legacy->dirty) {
+
+ assert(bo_legacy->tobj->base.memBlock);
+
+ if (bo_legacy->tobj)
+ driUpdateTextureLRU(&bo_legacy->tobj->base);
+
+ if (bo_legacy->dirty || bo_legacy->tobj->base.dirty_images[0]) {
/* Copy to VRAM using a blit.
* All memory is 4K aligned. We're using 1024 pixels wide blits.
*/
}
} while (ret == -EAGAIN);
bo_legacy->dirty = 0;
+ bo_legacy->tobj->base.dirty_images[0] = 0;
}
return 0;
}
+/*
+ * radeon_bo_legacy_validate -
+ * returns:
+ * 0 - all good
+ * -EINVAL - mapped buffer can't be validated
+ * -EAGAIN - restart validation we've kicked all the buffers out
+ */
int radeon_bo_legacy_validate(struct radeon_bo *bo,
uint32_t *soffset,
uint32_t *eoffset)
{
+ struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bo->bom;
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
int r;
+ int retries = 0;
if (bo_legacy->map_count) {
fprintf(stderr, "bo(%p, %d) is mapped (%d) can't valide it.\n",
return 0;
}
if (!(bo->domains & RADEON_GEM_DOMAIN_GTT)) {
+
r = bo_vram_validate(bo, soffset, eoffset);
if (r) {
- return r;
+ legacy_track_pending(boml, 0);
+ legacy_kick_all_buffers(boml);
+ retries++;
+ if (retries == 2) {
+ fprintf(stderr,"legacy bo: failed to get relocations into aperture\n");
+ assert(0);
+ exit(-1);
+ }
+ return -EAGAIN;
}
}
*soffset = bo_legacy->offset;
struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
bo_legacy->pending = pending;
- bo_legacy->is_pending += 1;
+ bo_legacy->is_pending++;
/* add to pending list */
radeon_bo_ref(bo);
if (bo_legacy->is_pending > 1) {
boml->cpendings++;
}
-void radeon_bo_manager_legacy_shutdown(struct radeon_bo_manager *bom)
+void radeon_bo_manager_legacy_dtor(struct radeon_bo_manager *bom)
{
struct bo_manager_legacy *boml = (struct bo_manager_legacy *)bom;
struct bo_legacy *bo_legacy;
bo_free(bo_legacy);
bo_legacy = next;
}
+ driDestroyTextureHeap(boml->texture_heap);
free(boml->free_handles);
free(boml);
}
-struct radeon_bo_manager *radeon_bo_manager_legacy(struct radeon_screen *scrn)
+static struct bo_legacy *radeon_legacy_bo_alloc_static(struct bo_manager_legacy *bom,
+ int size, uint32_t offset)
+{
+ struct bo_legacy *bo;
+
+ bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
+ if (bo == NULL)
+ return NULL;
+ bo->static_bo = 1;
+ bo->offset = offset + bom->fb_location;
+ bo->base.handle = bo->offset;
+ bo->ptr = bom->screen->driScreen->pFB + offset;
+ if (bo->base.handle > bom->nhandle) {
+ bom->nhandle = bo->base.handle + 1;
+ }
+ radeon_bo_ref(&(bo->base));
+ return bo;
+}
+
+struct radeon_bo_manager *radeon_bo_manager_legacy_ctor(struct radeon_screen *scrn)
{
struct bo_manager_legacy *bom;
struct bo_legacy *bo;
return NULL;
}
+ make_empty_list(&bom->texture_swapped);
+
bom->texture_heap = driCreateTextureHeap(0,
bom,
scrn->texSize[0],
(drmTextureRegionPtr)scrn->sarea->tex_list[0],
&scrn->sarea->tex_age[0],
&bom->texture_swapped,
- sizeof(struct bo_legacy),
+ sizeof(struct bo_legacy_texture_object),
&bo_legacy_tobj_destroy);
bom->texture_offset = scrn->texOffset[0];
bom->nfree_handles = 0x400;
bom->free_handles = (uint32_t*)malloc(bom->nfree_handles * 4);
if (bom->free_handles == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
/* biggest framebuffer size */
size = 4096*4096*4;
+
/* allocate front */
- bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->frontOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
if (scrn->sarea->tiling_enabled) {
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
}
- bo->static_bo = 1;
- bo->offset = bom->screen->frontOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->frontOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
- }
+
/* allocate back */
- bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->backOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
if (scrn->sarea->tiling_enabled) {
bo->base.flags = RADEON_BO_FLAGS_MACRO_TILE;
}
- bo->static_bo = 1;
- bo->offset = bom->screen->backOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->backOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
- }
+
/* allocate depth */
- bo = bo_allocate(bom, size, 0, RADEON_GEM_DOMAIN_VRAM, 0);
- if (bo == NULL) {
- radeon_bo_manager_legacy_shutdown((struct radeon_bo_manager*)bom);
+ bo = radeon_legacy_bo_alloc_static(bom, size, bom->screen->depthOffset);
+ if (!bo) {
+ radeon_bo_manager_legacy_dtor((struct radeon_bo_manager*)bom);
return NULL;
}
bo->base.flags = 0;
bo->base.flags |= RADEON_BO_FLAGS_MACRO_TILE;
bo->base.flags |= RADEON_BO_FLAGS_MICRO_TILE;
}
- bo->static_bo = 1;
- bo->offset = bom->screen->depthOffset + bom->fb_location;
- bo->base.handle = bo->offset;
- bo->ptr = scrn->driScreen->pFB + bom->screen->depthOffset;
- if (bo->base.handle > bom->nhandle) {
- bom->nhandle = bo->base.handle + 1;
- }
return (struct radeon_bo_manager*)bom;
}
}
return bo->size;
}
+
+int radeon_legacy_bo_is_static(struct radeon_bo *bo)
+{
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
+ return bo_legacy->static_bo;
+}
+