EGL_SOURCES = server/radeon_egl.c
endif
+ifeq ($(RADEON_LDFLAGS),)
+CS_SOURCES = radeon_cs_space_drm.c
+endif
+
RADEON_COMMON_SOURCES = \
radeon_texture.c \
radeon_common_context.c \
radeon_screen.c \
$(EGL_SOURCES) \
$(RADEON_COMMON_SOURCES)
+ $(CS_SOURCES)
C_SOURCES = $(COMMON_SOURCES) $(DRIVER_SOURCES)
radeon_texture.h \
radeon_dma.c \
radeon_dma.h \
- radeon_fbo.c
+ radeon_fbo.c \
+ $(CS_SOURCES)
DRI_LIB_DEPS += $(RADEON_LDFLAGS)
GLuint min_nr )
{
GLushort *retval;
+ int ret;
if (R200_DEBUG & DEBUG_IOCTL)
fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
rmesa->radeon.tcl.elt_dma_offset = 0;
rmesa->tcl.elt_used = min_nr * 2;
- radeon_validate_bo(&rmesa->radeon, rmesa->radeon.tcl.elt_dma_bo,
- RADEON_GEM_DOMAIN_GTT, 0);
- if (radeon_revalidate_bos(rmesa->radeon.glCtx) == GL_FALSE)
+ ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, rmesa->radeon.tcl.elt_dma_bo,
+ RADEON_GEM_DOMAIN_GTT, 0);
+ if (ret) {
fprintf(stderr,"failure to revalidate BOs - badness\n");
+ }
radeon_bo_map(rmesa->radeon.tcl.elt_dma_bo, 1);
retval = rmesa->radeon.tcl.elt_dma_bo->ptr + rmesa->radeon.tcl.elt_dma_offset;
{
r200ContextPtr rmesa = R200_CONTEXT(ctx);
struct radeon_renderbuffer *rrb;
- int i;
+ int i, ret;
- radeon_validate_reset_bos(&rmesa->radeon);
+ radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);
rrb = radeon_get_colorbuffer(&rmesa->radeon);
/* color buffer */
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
+ 0, RADEON_GEM_DOMAIN_VRAM);
}
/* depth buffer */
rrb = radeon_get_depthbuffer(&rmesa->radeon);
/* color buffer */
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
+ 0, RADEON_GEM_DOMAIN_VRAM);
}
for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
if (t->image_override && t->bo)
- radeon_validate_bo(&rmesa->radeon, t->bo,
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->bo,
RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
else if (t->mt->bo)
- radeon_validate_bo(&rmesa->radeon, t->mt->bo,
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->mt->bo,
RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
}
- if (rmesa->radeon.dma.current)
- radeon_validate_bo(&rmesa->radeon, rmesa->radeon.dma.current, RADEON_GEM_DOMAIN_GTT, 0);
-
- return radeon_revalidate_bos(ctx);
+ ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, rmesa->radeon.dma.current, RADEON_GEM_DOMAIN_GTT, 0);
+ if (ret)
+ return GL_FALSE;
+ return GL_TRUE;
}
GLboolean r200ValidateState( GLcontext *ctx )
EGL_SOURCES = server/radeon_egl.c
endif
+ifeq ($(RADEON_LDFLAGS),)
+CS_SOURCES = radeon_cs_space_drm.c
+endif
+
COMMON_SOURCES = \
../../common/driverfuncs.c \
../common/mm.c \
r300_emit.c \
r300_swtcl.c \
$(RADEON_COMMON_SOURCES) \
- $(EGL_SOURCES)
+ $(EGL_SOURCES) \
+ $(CS_SOURCES)
C_SOURCES = $(COMMON_SOURCES) $(DRIVER_SOURCES)
radeon_mipmap_tree.h \
radeon_texture.c \
radeon_texture.h \
- radeon_fbo.c
+ radeon_fbo.c \
+ $(CS_SOURCES)
DRI_LIB_DEPS += $(RADEON_LDFLAGS)
}
}
-static void r300KernelClear(GLcontext *ctx, GLuint flags)
+static int r300KernelClear(GLcontext *ctx, GLuint flags)
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
__DRIdrawablePrivate *dPriv = radeon_get_drawable(&r300->radeon);
struct radeon_framebuffer *rfb = dPriv->driverPrivate;
struct radeon_renderbuffer *rrb;
struct radeon_renderbuffer *rrbd;
- int bits = 0;
+ int bits = 0, ret;
/* Make sure it fits there. */
+ radeon_cs_space_reset_bos(r300->radeon.cmdbuf.cs);
+
+ if (flags & BUFFER_BIT_COLOR0) {
+ rrb = radeon_get_renderbuffer(&rfb->base, BUFFER_COLOR0);
+ radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs,
+ rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM);
+ }
+
+ if (flags & BUFFER_BIT_FRONT_LEFT) {
+ rrb = radeon_get_renderbuffer(&rfb->base, BUFFER_FRONT_LEFT);
+ radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs,
+ rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM);
+ }
+
+ if (flags & BUFFER_BIT_BACK_LEFT) {
+ rrb = radeon_get_renderbuffer(&rfb->base, BUFFER_BACK_LEFT);
+ radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs,
+ rrb->bo, 0, RADEON_GEM_DOMAIN_VRAM);
+ }
+
+ rrbd = radeon_get_renderbuffer(&rfb->base, BUFFER_DEPTH);
+ if (rrbd) {
+ radeon_cs_space_add_persistent_bo(r300->radeon.cmdbuf.cs,
+ rrbd->bo, 0, RADEON_GEM_DOMAIN_VRAM);
+ }
+
+ ret = radeon_cs_space_check(r300->radeon.cmdbuf.cs);
+ if (ret)
+ return -1;
+
rcommonEnsureCmdBufSpace(&r300->radeon, 421 * 3, __FUNCTION__);
if (flags || bits)
r300EmitClearState(ctx);
+
rrbd = radeon_get_renderbuffer(&rfb->base, BUFFER_DEPTH);
if (rrbd && (flags & BUFFER_BIT_DEPTH))
bits |= CLEARBUFFER_DEPTH;
r300ClearBuffer(r300, bits, NULL, rrbd);
COMMIT_BATCH();
+ return 0;
}
/**
__DRIdrawablePrivate *dPriv = radeon_get_drawable(&r300->radeon);
const GLuint colorMask = *((GLuint *) & ctx->Color.ColorMask);
GLbitfield swrast_mask = 0, tri_mask = 0;
- int i;
+ int i, ret;
struct gl_framebuffer *fb = ctx->DrawBuffer;
if (RADEON_DEBUG & DEBUG_IOCTL)
/* SW fallback clearing */
swrast_mask = mask & ~tri_mask;
+ ret = 0;
if (tri_mask) {
if (r300->radeon.radeonScreen->kernel_mm)
r300UserClear(ctx, tri_mask);
- else
- r300KernelClear(ctx, tri_mask);
+ else {
+ /* if kernel clear fails due to size restraints fallback */
+ ret = r300KernelClear(ctx, tri_mask);
+ if (ret < 0)
+ swrast_mask |= tri_mask;
+ }
}
+
if (swrast_mask) {
if (RADEON_DEBUG & DEBUG_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n",
r300ContextPtr rmesa = R300_CONTEXT(ctx);
struct radeon_renderbuffer *rrb;
int i;
+ int ret;
- radeon_validate_reset_bos(&rmesa->radeon);
+ radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);
rrb = radeon_get_colorbuffer(&rmesa->radeon);
/* color buffer */
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
+ rrb->bo, 0,
+ RADEON_GEM_DOMAIN_VRAM);
}
/* depth buffer */
rrb = radeon_get_depthbuffer(&rmesa->radeon);
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
+ rrb->bo, 0,
+ RADEON_GEM_DOMAIN_VRAM);
}
for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
}
t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
if (t->image_override && t->bo)
- radeon_validate_bo(&rmesa->radeon, t->bo,
- RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
-
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
+ t->bo,
+ RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
else if (t->mt->bo)
- radeon_validate_bo(&rmesa->radeon, t->mt->bo,
- RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs,
+ t->mt->bo,
+ RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
}
- if (rmesa->radeon.dma.current)
- radeon_validate_bo(&rmesa->radeon, rmesa->radeon.dma.current, RADEON_GEM_DOMAIN_GTT, 0);
- return radeon_revalidate_bos(ctx);
+ ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, rmesa->radeon.dma.current, RADEON_GEM_DOMAIN_GTT, 0);
+ if (ret)
+ return GL_FALSE;
+ return GL_TRUE;
}
void r300SetTexOffset(__DRIcontext * pDRICtx, GLint texname,
MINIGLX_SOURCES = server/radeon_dri.c
+ifeq ($(RADEON_LDFLAGS),)
+CS_SOURCES = radeon_cs_space_drm.c
+endif
+
RADEON_COMMON_SOURCES = \
radeon_texture.c \
radeon_common_context.c \
C_SOURCES = \
$(COMMON_SOURCES) \
- $(DRIVER_SOURCES)
+ $(DRIVER_SOURCES) \
+ $(CS_SOURCES)
DRIVER_DEFINES = -DRADEON_COMMON=0
int (*bo_map)(struct radeon_bo *bo, int write);
int (*bo_unmap)(struct radeon_bo *bo);
int (*bo_wait)(struct radeon_bo *bo);
+ int (*bo_is_static)(struct radeon_bo *bo);
};
struct radeon_bo_manager {
return bo->bom->funcs->bo_wait(bo);
}
+static inline int radeon_bo_is_static(struct radeon_bo *bo)
+{
+ if (bo->bom->funcs->bo_is_static)
+ return bo->bom->funcs->bo_is_static(bo);
+ return 0;
+}
+
#define radeon_bo_open(bom, h, s, a, d, f)\
_radeon_bo_open(bom, h, s, a, d, f, __FILE__, __FUNCTION__, __LINE__)
#define radeon_bo_ref(bo)\
return 0;
}
+
+static int bo_is_static(struct radeon_bo *bo)
+{
+ struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
+ return bo_legacy->static_bo;
+}
+
static struct radeon_bo_funcs bo_legacy_funcs = {
bo_open,
bo_ref,
bo_unref,
bo_map,
- bo_unmap
+ bo_unmap,
+ NULL,
+ bo_is_static,
};
static int bo_vram_validate(struct radeon_bo *bo,
return bo->size;
}
-int radeon_legacy_bo_is_static(struct radeon_bo *bo)
-{
- struct bo_legacy *bo_legacy = (struct bo_legacy*)bo;
- return bo_legacy->static_bo;
-}
-
COMMIT_BATCH();
}
-GLboolean radeon_revalidate_bos(GLcontext *ctx)
+static GLboolean radeon_revalidate_bos(GLcontext *ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
- int flushed = 0;
int ret;
-again:
- ret = radeon_cs_space_check(radeon->cmdbuf.cs, radeon->state.bos, radeon->state.validated_bo_count);
- if (ret == RADEON_CS_SPACE_OP_TO_BIG)
+
+ ret = radeon_cs_space_check(radeon->cmdbuf.cs);
+ if (ret == RADEON_CS_SPACE_FLUSH)
return GL_FALSE;
- if (ret == RADEON_CS_SPACE_FLUSH) {
- radeonFlush(ctx);
- if (flushed)
- return GL_FALSE;
- flushed = 1;
- goto again;
- }
return GL_TRUE;
}
-void radeon_validate_reset_bos(radeonContextPtr radeon)
-{
- int i;
-
- for (i = 0; i < radeon->state.validated_bo_count; i++) {
- radeon_bo_unref(radeon->state.bos[i].bo);
- radeon->state.bos[i].bo = NULL;
- radeon->state.bos[i].read_domains = 0;
- radeon->state.bos[i].write_domain = 0;
- radeon->state.bos[i].new_accounted = 0;
- }
- radeon->state.validated_bo_count = 0;
-}
-
-void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
-{
- int i;
- for (i = 0; i < radeon->state.validated_bo_count; i++) {
- if (radeon->state.bos[i].bo == bo &&
- radeon->state.bos[i].read_domains == read_domains &&
- radeon->state.bos[i].write_domain == write_domain)
- return;
- }
- radeon_bo_ref(bo);
- radeon->state.bos[radeon->state.validated_bo_count].bo = bo;
- radeon->state.bos[radeon->state.validated_bo_count].read_domains = read_domains;
- radeon->state.bos[radeon->state.validated_bo_count].write_domain = write_domain;
- radeon->state.bos[radeon->state.validated_bo_count].new_accounted = 0;
- radeon->state.validated_bo_count++;
-
- assert(radeon->state.validated_bo_count < RADEON_MAX_BOS);
-}
-
void radeonEmitState(radeonContextPtr radeon)
{
if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
assert(rmesa->cmdbuf.cs != NULL);
rmesa->cmdbuf.size = size;
+ radeon_cs_space_set_flush(rmesa->cmdbuf.cs,
+ (void (*)(void *))radeonFlush, rmesa->glCtx);
+
if (!rmesa->radeonScreen->kernel_mm) {
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_VRAM, rmesa->radeonScreen->texSize[0]);
radeon_cs_set_limit(rmesa->cmdbuf.cs, RADEON_GEM_DOMAIN_GTT, rmesa->radeonScreen->gartTextures.size);
struct drm_clip_rect **cliprects,
unsigned int *num_cliprects,
int *x_off, int *y_off);
-GLboolean radeon_revalidate_bos(GLcontext *ctx);
-void radeon_validate_bo(radeonContextPtr radeon, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain);
-void radeon_validate_reset_bos(radeonContextPtr radeon);
-
void radeon_fbo_init(struct radeon_context *radeon);
void
radeon_renderbuffer_set_bo(struct radeon_renderbuffer *rb,
uint32_t new_accounted;
};
+#define MAX_SPACE_BOS (32)
+
struct radeon_cs_manager;
struct radeon_cs {
const char *section_file;
const char *section_func;
int section_line;
-
+ struct radeon_cs_space_check bos[MAX_SPACE_BOS];
+ int bo_count;
+ void (*space_flush_fn)(void *);
+ void *space_flush_data;
};
/* cs functions */
int (*cs_erase)(struct radeon_cs *cs);
int (*cs_need_flush)(struct radeon_cs *cs);
void (*cs_print)(struct radeon_cs *cs, FILE *file);
- int (*cs_space_check)(struct radeon_cs *cs, struct radeon_cs_space_check *bos,
- int num_bo);
};
struct radeon_cs_manager {
struct radeon_cs_funcs *funcs;
int fd;
- uint32_t vram_limit, gart_limit;
- uint32_t vram_write_used, gart_write_used;
- uint32_t read_used;
+ int32_t vram_limit, gart_limit;
+ int32_t vram_write_used, gart_write_used;
+ int32_t read_used;
};
static inline struct radeon_cs *radeon_cs_create(struct radeon_cs_manager *csm,
cs->csm->funcs->cs_print(cs, file);
}
-static inline int radeon_cs_space_check(struct radeon_cs *cs,
- struct radeon_cs_space_check *bos,
- int num_bo)
-{
- return cs->csm->funcs->cs_space_check(cs, bos, num_bo);
-}
-
static inline void radeon_cs_set_limit(struct radeon_cs *cs, uint32_t domain, uint32_t limit)
{
cs->section_cdw+=2;
}
}
+
+static inline void radeon_cs_space_set_flush(struct radeon_cs *cs, void (*fn)(void *), void *data)
+{
+ cs->space_flush_fn = fn;
+ cs->space_flush_data = data;
+}
+
+
+/*
+ * add a persistent BO to the list
+ * a persistent BO is one that will be referenced across flushes,
+ * i.e. colorbuffer, textures etc.
+ * They get reset when a new "operation" happens, where an operation
+ * is a state emission with a color/textures etc followed by a bunch of vertices.
+ */
+void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
+/* reset the persistent BO list */
+void radeon_cs_space_reset_bos(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list */
+int radeon_cs_space_check(struct radeon_cs *cs);
+
+/* do a space check with the current persistent BO list and a temporary BO
+ * a temporary BO is like a DMA buffer, which gets flushed with the
+ * command buffer */
+int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains,
+ uint32_t write_domain);
+
#endif
{
}
-static int cs_check_space(struct radeon_cs *cs, struct radeon_cs_space_check *bos, int num_bo)
-{
- struct radeon_cs_manager *csm = cs->csm;
- int this_op_read = 0, this_op_gart_write = 0, this_op_vram_write = 0;
- uint32_t read_domains, write_domain;
- int i;
- struct radeon_bo *bo;
-
- /* check the totals for this operation */
-
- if (num_bo == 0)
- return 0;
-
- /* prepare */
- for (i = 0; i < num_bo; i++) {
- bo = bos[i].bo;
-
- bos[i].new_accounted = 0;
- read_domains = bos[i].read_domains;
- write_domain = bos[i].write_domain;
-
- /* pinned bos don't count */
- if (radeon_legacy_bo_is_static(bo))
- continue;
-
- /* already accounted this bo */
- if (write_domain && (write_domain == bo->space_accounted)) {
- bos[i].new_accounted = bo->space_accounted;
- continue;
- }
-
- if (read_domains && ((read_domains << 16) == bo->space_accounted)) {
- bos[i].new_accounted = bo->space_accounted;
- continue;
- }
-
- if (bo->space_accounted == 0) {
- if (write_domain == RADEON_GEM_DOMAIN_VRAM)
- this_op_vram_write += bo->size;
- else if (write_domain == RADEON_GEM_DOMAIN_GTT)
- this_op_gart_write += bo->size;
- else
- this_op_read += bo->size;
- bos[i].new_accounted = (read_domains << 16) | write_domain;
- } else {
- uint16_t old_read, old_write;
-
- old_read = bo->space_accounted >> 16;
- old_write = bo->space_accounted & 0xffff;
-
- if (write_domain && (old_read & write_domain)) {
- bos[i].new_accounted = write_domain;
- /* moving from read to a write domain */
- if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
- this_op_read -= bo->size;
- this_op_vram_write += bo->size;
- } else if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
- this_op_read -= bo->size;
- this_op_gart_write += bo->size;
- }
- } else if (read_domains & old_write) {
- bos[i].new_accounted = bo->space_accounted & 0xffff;
- } else {
- /* rewrite the domains */
- if (write_domain != old_write)
- fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
- if (read_domains != old_read)
- fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
- return RADEON_CS_SPACE_FLUSH;
- }
- }
- }
-
- if (this_op_read < 0)
- this_op_read = 0;
-
- /* check sizes - operation first */
- if ((this_op_read + this_op_gart_write > csm->gart_limit) ||
- (this_op_vram_write > csm->vram_limit)) {
- return RADEON_CS_SPACE_OP_TO_BIG;
- }
-
- if (((csm->vram_write_used + this_op_vram_write) > csm->vram_limit) ||
- ((csm->read_used + csm->gart_write_used + this_op_gart_write + this_op_read) > csm->gart_limit)) {
- return RADEON_CS_SPACE_FLUSH;
- }
-
- csm->gart_write_used += this_op_gart_write;
- csm->vram_write_used += this_op_vram_write;
- csm->read_used += this_op_read;
- /* commit */
- for (i = 0; i < num_bo; i++) {
- bo = bos[i].bo;
- bo->space_accounted = bos[i].new_accounted;
- }
-
- return RADEON_CS_SPACE_OK;
-}
-
static struct radeon_cs_funcs radeon_cs_legacy_funcs = {
cs_create,
cs_write_reloc,
cs_erase,
cs_need_flush,
cs_print,
- cs_check_space
};
struct radeon_cs_manager *radeon_cs_manager_legacy_ctor(struct radeon_context *ctx)
--- /dev/null
+/*
+ * Copyright © 2009 Red Hat Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
+ * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ */
+/*
+ */
+#include <assert.h>
+#include <errno.h>
+#include <stdlib.h>
+#include "radeon_bocs_wrapper.h"
+
+struct rad_sizes {
+ int32_t op_read;
+ int32_t op_gart_write;
+ int32_t op_vram_write;
+};
+
+static inline int radeon_cs_setup_bo(struct radeon_cs_space_check *sc, struct rad_sizes *sizes)
+{
+ uint32_t read_domains, write_domain;
+ struct radeon_bo *bo;
+
+ bo = sc->bo;
+ sc->new_accounted = 0;
+ read_domains = sc->read_domains;
+ write_domain = sc->write_domain;
+
+ /* legacy needs a static check */
+ if (radeon_bo_is_static(bo)) {
+ bo->space_accounted = sc->new_accounted = (read_domains << 16) | write_domain;
+ return 0;
+ }
+
+ /* already accounted this bo */
+ if (write_domain && (write_domain == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+ if (read_domains && ((read_domains << 16) == bo->space_accounted)) {
+ sc->new_accounted = bo->space_accounted;
+ return 0;
+ }
+
+ if (bo->space_accounted == 0) {
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM)
+ sizes->op_vram_write += bo->size;
+ else if (write_domain == RADEON_GEM_DOMAIN_GTT)
+ sizes->op_gart_write += bo->size;
+ else
+ sizes->op_read += bo->size;
+ sc->new_accounted = (read_domains << 16) | write_domain;
+ } else {
+ uint16_t old_read, old_write;
+
+ old_read = bo->space_accounted >> 16;
+ old_write = bo->space_accounted & 0xffff;
+
+ if (write_domain && (old_read & write_domain)) {
+ sc->new_accounted = write_domain;
+ /* moving from read to a write domain */
+ if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
+ sizes->op_read -= bo->size;
+ sizes->op_vram_write += bo->size;
+ } else if (write_domain == RADEON_GEM_DOMAIN_VRAM) {
+ sizes->op_read -= bo->size;
+ sizes->op_gart_write += bo->size;
+ }
+ } else if (read_domains & old_write) {
+ sc->new_accounted = bo->space_accounted & 0xffff;
+ } else {
+ /* rewrite the domains */
+ if (write_domain != old_write)
+ fprintf(stderr,"WRITE DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, write_domain, old_write);
+ if (read_domains != old_read)
+ fprintf(stderr,"READ DOMAIN RELOC FAILURE 0x%x %d %d\n", bo->handle, read_domains, old_read);
+ return RADEON_CS_SPACE_FLUSH;
+ }
+ }
+ return 0;
+}
+
+static int radeon_cs_do_space_check(struct radeon_cs *cs, struct radeon_cs_space_check *new_tmp)
+{
+ struct radeon_cs_manager *csm = cs->csm;
+ int i;
+ struct radeon_bo *bo;
+ struct rad_sizes sizes;
+ int ret;
+
+ /* check the totals for this operation */
+
+ if (cs->bo_count == 0 && !new_tmp)
+ return 0;
+
+ memset(&sizes, 0, sizeof(struct rad_sizes));
+
+ /* prepare */
+ for (i = 0; i < cs->bo_count; i++) {
+ ret = radeon_cs_setup_bo(&cs->bos[i], &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (new_tmp) {
+ ret = radeon_cs_setup_bo(new_tmp, &sizes);
+ if (ret)
+ return ret;
+ }
+
+ if (sizes.op_read < 0)
+ sizes.op_read = 0;
+
+ /* check sizes - operation first */
+ if ((sizes.op_read + sizes.op_gart_write > csm->gart_limit) ||
+ (sizes.op_vram_write > csm->vram_limit)) {
+ return RADEON_CS_SPACE_OP_TO_BIG;
+ }
+
+ if (((csm->vram_write_used + sizes.op_vram_write) > csm->vram_limit) ||
+ ((csm->read_used + csm->gart_write_used + sizes.op_gart_write + sizes.op_read) > csm->gart_limit)) {
+ return RADEON_CS_SPACE_FLUSH;
+ }
+
+ csm->gart_write_used += sizes.op_gart_write;
+ csm->vram_write_used += sizes.op_vram_write;
+ csm->read_used += sizes.op_read;
+ /* commit */
+ for (i = 0; i < cs->bo_count; i++) {
+ bo = cs->bos[i].bo;
+ bo->space_accounted = cs->bos[i].new_accounted;
+ }
+ if (new_tmp)
+ new_tmp->bo->space_accounted = new_tmp->new_accounted;
+
+ return RADEON_CS_SPACE_OK;
+}
+
+void radeon_cs_space_add_persistent_bo(struct radeon_cs *cs, struct radeon_bo *bo, uint32_t read_domains, uint32_t write_domain)
+{
+ int i;
+ for (i = 0; i < cs->bo_count; i++) {
+ if (cs->bos[i].bo == bo &&
+ cs->bos[i].read_domains == read_domains &&
+ cs->bos[i].write_domain == write_domain)
+ return;
+ }
+ radeon_bo_ref(bo);
+ i = cs->bo_count;
+ cs->bos[i].bo = bo;
+ cs->bos[i].read_domains = read_domains;
+ cs->bos[i].write_domain = write_domain;
+ cs->bos[i].new_accounted = 0;
+ cs->bo_count++;
+
+ assert(cs->bo_count < MAX_SPACE_BOS);
+}
+
+static int radeon_cs_check_space_internal(struct radeon_cs *cs, struct radeon_cs_space_check *tmp_bo)
+{
+ int ret;
+ int flushed = 0;
+
+again:
+ ret = radeon_cs_do_space_check(cs, tmp_bo);
+ if (ret == RADEON_CS_SPACE_OP_TO_BIG)
+ return -1;
+ if (ret == RADEON_CS_SPACE_FLUSH) {
+ (*cs->space_flush_fn)(cs->space_flush_data);
+ if (flushed)
+ return -1;
+ flushed = 1;
+ goto again;
+ }
+ return 0;
+}
+
+int radeon_cs_space_check_with_bo(struct radeon_cs *cs,
+ struct radeon_bo *bo,
+ uint32_t read_domains, uint32_t write_domain)
+{
+ struct radeon_cs_space_check temp_bo;
+ int ret = 0;
+
+ if (bo) {
+ temp_bo.bo = bo;
+ temp_bo.read_domains = read_domains;
+ temp_bo.write_domain = write_domain;
+ temp_bo.new_accounted = 0;
+ }
+
+ ret = radeon_cs_check_space_internal(cs, bo ? &temp_bo : NULL);
+ return ret;
+}
+
+int radeon_cs_space_check(struct radeon_cs *cs)
+{
+ return radeon_cs_check_space_internal(cs, NULL);
+}
+
+void radeon_cs_space_reset_bos(struct radeon_cs *cs)
+{
+ int i;
+ for (i = 0; i < cs->bo_count; i++) {
+ radeon_bo_unref(cs->bos[i].bo);
+ cs->bos[i].bo = NULL;
+ cs->bos[i].read_domains = 0;
+ cs->bos[i].write_domain = 0;
+ cs->bos[i].new_accounted = 0;
+ }
+ cs->bo_count = 0;
+}
+
+
rmesa->dma.current_used = 0;
rmesa->dma.current_vertexptr = 0;
- radeon_validate_bo(rmesa, rmesa->dma.current, RADEON_GEM_DOMAIN_GTT, 0);
-
- if (radeon_revalidate_bos(rmesa->glCtx) == GL_FALSE)
- fprintf(stderr,"failure to revalidate BOs - badness\n");
+ if (radeon_cs_space_check_with_bo(rmesa->cmdbuf.cs,
+ rmesa->dma.current,
+ RADEON_GEM_DOMAIN_GTT, 0))
+ fprintf(stderr,"failure to revalidate BOs - badness\n");
if (!rmesa->dma.current) {
/* Cmd buff have been flushed in radeon_revalidate_bos */
{
r100ContextPtr rmesa = R100_CONTEXT(ctx);
struct radeon_renderbuffer *rrb;
- int i;
+ int i, ret;
- radeon_validate_reset_bos(&rmesa->radeon);
+ radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);
rrb = radeon_get_colorbuffer(&rmesa->radeon);
/* color buffer */
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
+ 0, RADEON_GEM_DOMAIN_VRAM);
}
/* depth buffer */
rrb = radeon_get_depthbuffer(&rmesa->radeon);
/* color buffer */
if (rrb && rrb->bo) {
- radeon_validate_bo(&rmesa->radeon, rrb->bo,
- 0, RADEON_GEM_DOMAIN_VRAM);
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, rrb->bo,
+ 0, RADEON_GEM_DOMAIN_VRAM);
}
for (i = 0; i < ctx->Const.MaxTextureImageUnits; ++i) {
t = radeon_tex_obj(ctx->Texture.Unit[i]._Current);
if (t->image_override && t->bo)
- radeon_validate_bo(&rmesa->radeon, t->bo,
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->bo,
RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
else if (t->mt->bo)
- radeon_validate_bo(&rmesa->radeon, t->mt->bo,
+ radeon_cs_space_add_persistent_bo(rmesa->radeon.cmdbuf.cs, t->mt->bo,
RADEON_GEM_DOMAIN_GTT | RADEON_GEM_DOMAIN_VRAM, 0);
}
- if (rmesa->radeon.dma.current)
- radeon_validate_bo(&rmesa->radeon, rmesa->radeon.dma.current,
- RADEON_GEM_DOMAIN_GTT, 0);
-
- return radeon_revalidate_bos(ctx);
+ ret = radeon_cs_space_check_with_bo(rmesa->radeon.cmdbuf.cs, rmesa->radeon.dma.current, RADEON_GEM_DOMAIN_GTT, 0);
+ if (ret)
+ return GL_FALSE;
+ return GL_TRUE;
}
GLboolean radeonValidateState( GLcontext *ctx )