endif
RADEON_COMMON_SOURCES = \
- radeon_texture.c \
+ radeon_bo_legacy.c \
radeon_common_context.c \
radeon_common.c \
+ radeon_cs_legacy.c \
radeon_dma.c \
+ radeon_debug.c \
+ radeon_fbo.c \
radeon_lock.c \
- radeon_bo_legacy.c \
- radeon_cs_legacy.c \
radeon_mipmap_tree.c \
+ radeon_queryobj.c \
radeon_span.c \
- radeon_fbo.c \
- radeon_queryobj.c
+ radeon_texture.c
DRIVER_SOURCES = \
radeon_context.c \
&tex,
sizeof(drm_radeon_texture_t));
if (ret) {
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
usleep(1);
}
&tex,
sizeof(drm_radeon_texture_t));
if (ret) {
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "DRM_RADEON_TEXTURE: again!\n");
usleep(1);
}
* 1 most output
* 2 also print state alues
*/
-#define DEBUG_CMDBUF 0
+#define RADEON_CMDBUF 0
/* =============================================================
* Scissoring
rfb = dPriv->driverPrivate;
- if ( RADEON_DEBUG & DEBUG_IOCTL ) {
+ if ( RADEON_DEBUG & RADEON_IOCTL ) {
fprintf( stderr, "\n%s( %p )\n\n", __FUNCTION__, (void *) rmesa->glCtx );
}
LOCK_HARDWARE(radeon);
- if ( RADEON_DEBUG & DEBUG_IOCTL ) {
+ if ( RADEON_DEBUG & RADEON_IOCTL ) {
fprintf(stderr, "%s: pfCurrentPage: %d %d\n", __FUNCTION__,
radeon->sarea->pfCurrentPage, radeon->sarea->pfState);
}
*/
void radeonDrawBuffer( GLcontext *ctx, GLenum mode )
{
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s %s\n", __FUNCTION__,
_mesa_lookup_enum_by_nr( mode ));
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
- if (DEBUG_CMDBUF > 1 && RADEON_DEBUG & DEBUG_VERBOSE) {
+ if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
if (dwords > state->cmd_size)
dwords = state->cmd_size;
int i, j, reg, count;
int dwords;
uint32_t packet0;
- if (! (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) )
+ if (!radeon_is_debug_enabled(RADEON_STATE, RADEON_VERBOSE) )
return;
if (!radeon->radeonScreen->kernel_mm) {
fprintf(stderr, " emit %s %d/%d\n", state->name, dwords, state->cmd_size);
- if (DEBUG_CMDBUF > 1 && RADEON_DEBUG & DEBUG_VERBOSE) {
+ if (radeon_is_debug_enabled(RADEON_STATE, RADEON_TRACE)) {
if (dwords > state->cmd_size)
dwords = state->cmd_size;
for (i = 0; i < dwords;) {
struct radeon_state_atom *atom;
GLuint dwords = 0;
/* check if we are going to emit full state */
- if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_VERBOSE)
- fprintf(stderr, "%s\n", __func__);
if (radeon->cmdbuf.cs->cdw && !radeon->hw.all_dirty) {
if (!radeon->hw.is_dirty)
- return dwords;
+ goto out;
foreach(atom, &radeon->hw.atomlist) {
if (atom->dirty) {
const GLuint atom_size = atom->check(radeon->glCtx, atom);
dwords += atom_size;
- if (DEBUG_CMDBUF && atom_size) {
+ if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
}
}
foreach(atom, &radeon->hw.atomlist) {
const GLuint atom_size = atom->check(radeon->glCtx, atom);
dwords += atom_size;
- if (DEBUG_CMDBUF && atom_size) {
+ if (RADEON_CMDBUF && atom_size) {
radeon_print_state_atom(radeon, atom);
}
}
}
+out:
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s %u\n", __func__, dwords);
return dwords;
}
END_BATCH();
}
} else {
- if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_STATE) {
- fprintf(stderr, " skip state %s\n",
- atom->name);
- }
+ radeon_print(RADEON_STATE, RADEON_VERBOSE, " skip state %s\n", atom->name);
}
atom->dirty = GL_FALSE;
void radeonEmitState(radeonContextPtr radeon)
{
- if (RADEON_DEBUG & (DEBUG_STATE|DEBUG_PRIMS))
- fprintf(stderr, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
if (radeon->vtbl.pre_emit_state)
radeon->vtbl.pre_emit_state(radeon);
return;
if (!radeon->cmdbuf.cs->cdw) {
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "Begin reemit state\n");
radeonEmitAtoms(radeon, GL_TRUE);
} else {
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "Begin dirty state\n");
radeonEmitAtoms(radeon, GL_FALSE);
void radeonFlush(GLcontext *ctx)
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
/* okay if we have no cmds in the buffer &&
}
rmesa->cmdbuf.flushing = 1;
- if (RADEON_DEBUG & DEBUG_IOCTL) {
+ if (RADEON_DEBUG & RADEON_IOCTL) {
fprintf(stderr, "%s from %s - %i cliprects\n",
__FUNCTION__, caller, rmesa->numClipRects);
}
if (size > 64 * 256)
size = 64 * 256;
- if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA)) {
- fprintf(stderr, "sizeof(drm_r300_cmd_header_t)=%zd\n",
- sizeof(drm_r300_cmd_header_t));
- fprintf(stderr, "sizeof(drm_radeon_cmd_buffer_t)=%zd\n",
- sizeof(drm_radeon_cmd_buffer_t));
- fprintf(stderr,
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
+ "sizeof(drm_r300_cmd_header_t)=%zd\n", sizeof(drm_r300_cmd_header_t));
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
+ "sizeof(drm_radeon_cmd_buffer_t)=%zd\n", sizeof(drm_radeon_cmd_buffer_t));
+ radeon_print(RADEON_CS, RADEON_VERBOSE,
"Allocating %d bytes command buffer (max state is %d bytes)\n",
size * 4, rmesa->hw.max_state_size * 4);
- }
if (rmesa->radeonScreen->kernel_mm) {
int fd = rmesa->radeonScreen->driScreen->fd;
int line)
{
if (!rmesa->cmdbuf.cs->cdw && dostate) {
- if (RADEON_DEBUG & DEBUG_IOCTL)
- fprintf(stderr, "Reemit state after flush (from %s)\n", function);
+ radeon_print(RADEON_STATE, RADEON_NORMAL,
+ "Reemit state after flush (from %s)\n", function);
radeonEmitState(rmesa);
}
radeon_cs_begin(rmesa->cmdbuf.cs, n, file, function, line);
- if (DEBUG_CMDBUF && RADEON_DEBUG & DEBUG_IOCTL)
- fprintf(stderr, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
+ radeon_print(RADEON_CS, RADEON_VERBOSE, "BEGIN_BATCH(%d) at %d, from %s:%i\n",
n, rmesa->cmdbuf.cs->cdw, function, line);
}
{
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
radeon->glCtx);
char *regname;
struct radeon_bo *depth_bo = NULL, *bo;
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
draw = drawable->driverPrivate;
continue;
}
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr,
"attaching buffer %s, %d, at %d, cpp %d, pitch %d\n",
regname, buffers[i].name, buffers[i].attachment,
rb->has_surface = 0;
if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_bo) {
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "(reusing depth buffer as stencil)\n");
bo = depth_bo;
radeon_bo_ref(bo);
struct gl_framebuffer *readfb;
if (!driContextPriv) {
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
_mesa_make_current(NULL, NULL, NULL);
return GL_TRUE;
radeon_make_renderbuffer_current(radeon, drfb);
}
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, radeon->glCtx, drfb, readfb);
driUpdateFramebufferSize(radeon->glCtx, driDrawPriv);
}
- if (RADEON_DEBUG & DEBUG_DRI)
+ if (RADEON_DEBUG & RADEON_DRI)
fprintf(stderr, "End %s\n", __FUNCTION__);
return GL_TRUE;
#include "tnl/t_context.h"
#include "main/colormac.h"
+#include "radeon_debug.h"
#include "radeon_screen.h"
#include "radeon_drm.h"
#include "dri_util.h"
#include "radeon_bocs_wrapper.h"
-/* From http://gcc. gnu.org/onlinedocs/gcc-3.2.3/gcc/Variadic-Macros.html .
- I suppose we could inline this and use macro to fetch out __LINE__ and stuff in case we run into trouble
- with other compilers ... GLUE!
-*/
-#define WARN_ONCE(a, ...) { \
- static int warn##__LINE__=1; \
- if(warn##__LINE__){ \
- fprintf(stderr, "*********************************WARN_ONCE*********************************\n"); \
- fprintf(stderr, "File %s function %s line %d\n", \
- __FILE__, __FUNCTION__, __LINE__); \
- fprintf(stderr, a, ## __VA_ARGS__);\
- fprintf(stderr, "***************************************************************************\n"); \
- warn##__LINE__=0;\
- } \
- }
-
/* This union is used to avoid warnings/miscompilation
with float to uint32_t casts due to strict-aliasing */
typedef union { GLfloat f; uint32_t ui32; } float_ui32_type;
int drmMinor;
};
-#define DEBUG_TEXTURE 0x001
-#define DEBUG_STATE 0x002
-#define DEBUG_IOCTL 0x004
-#define DEBUG_PRIMS 0x008
-#define DEBUG_VERTS 0x010
-#define DEBUG_FALLBACKS 0x020
-#define DEBUG_VFMT 0x040
-#define DEBUG_CODEGEN 0x080
-#define DEBUG_VERBOSE 0x100
-#define DEBUG_DRI 0x200
-#define DEBUG_DMA 0x400
-#define DEBUG_SANITY 0x800
-#define DEBUG_SYNC 0x1000
-#define DEBUG_PIXEL 0x2000
-#define DEBUG_MEMORY 0x4000
-
-
typedef void (*radeon_tri_func) (radeonContextPtr,
radeonVertex *,
radeonVertex *, radeonVertex *);
__DRIdrawablePrivate * driReadPriv);
extern void radeonDestroyContext(__DRIcontextPrivate * driContextPriv);
-/* ================================================================
- * Debugging:
- */
-#define DO_DEBUG 1
-
-#if DO_DEBUG
-extern int RADEON_DEBUG;
-#else
-#define RADEON_DEBUG 0
-#endif
-
#endif
NULL,
};
-static const struct dri_debug_control debug_control[] =
-{
- { "fall", DEBUG_FALLBACKS },
- { "tex", DEBUG_TEXTURE },
- { "ioctl", DEBUG_IOCTL },
- { "prim", DEBUG_PRIMS },
- { "vert", DEBUG_VERTS },
- { "state", DEBUG_STATE },
- { "code", DEBUG_CODEGEN },
- { "vfmt", DEBUG_VFMT },
- { "vtxf", DEBUG_VFMT },
- { "verb", DEBUG_VERBOSE },
- { "dri", DEBUG_DRI },
- { "dma", DEBUG_DMA },
- { "san", DEBUG_SANITY },
- { "sync", DEBUG_SYNC },
- { NULL, 0 }
-};
-
static void r100_get_lock(radeonContextPtr radeon)
{
r100ContextPtr rmesa = (r100ContextPtr)radeon;
{"fall", RADEON_FALLBACKS},
{"tex", RADEON_TEXTURE},
{"ioctl", RADEON_IOCTL},
+ {"verts", RADEON_RENDER},
{"render", RADEON_RENDER},
{"swrender", RADEON_SWRENDER},
{"state", RADEON_STATE},
#endif
typedef enum radeon_debug_types {
- RADEON_TEXTURE = 0x0001,
- RADEON_STATE = 0x0002,
- RADEON_IOCTL = 0x0004,
- RADEON_RENDER = 0x0008,
- RADEON_SWRENDER = 0x0010,
- RADEON_FALLBACKS = 0x0020,
- RADEON_VFMT = 0x0040,
- RADEON_SHADER = 0x0080,
- RADEON_CS = 0x0100,
- RADEON_DRI = 0x0200,
- RADEON_DMA = 0x0400,
- RADEON_SANITY = 0x0800,
- RADEON_SYNC = 0x1000,
- RADEON_PIXEL = 0x2000,
- RADEON_MEMORY = 0x4000,
- RADEON_GENERAL = 0x8000 /* Used for errors and warnings */
+ RADEON_TEXTURE = 0x00001,
+ RADEON_STATE = 0x00002,
+ RADEON_IOCTL = 0x00004,
+ RADEON_RENDER = 0x00008,
+ RADEON_SWRENDER = 0x00010,
+ RADEON_FALLBACKS = 0x00020,
+ RADEON_VFMT = 0x00040,
+ RADEON_SHADER = 0x00080,
+ RADEON_CS = 0x00100,
+ RADEON_DRI = 0x00200,
+ RADEON_DMA = 0x00400,
+ RADEON_SANITY = 0x00800,
+ RADEON_SYNC = 0x01000,
+ RADEON_PIXEL = 0x02000,
+ RADEON_MEMORY = 0x04000,
+ RADEON_VERTS = 0x08000,
+ RADEON_GENERAL = 0x10000 /* Used for errors and warnings */
} radeon_debug_type_t;
extern radeon_debug_type_t radeon_enabled_debug_types;
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__FUNCTION__, count, stride, (void *)out, (void *)data);
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__FUNCTION__, count, stride, (void *)out, (void *)data);
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__FUNCTION__, count, stride, (void *)out, (void *)data);
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
__FUNCTION__, count, stride, (void *)out, (void *)data);
if (size > rmesa->dma.minimum_size)
rmesa->dma.minimum_size = (size + 15) & (~15);
- if (RADEON_DEBUG & (DEBUG_IOCTL | DEBUG_DMA))
- fprintf(stderr, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %d\n",
+ __FUNCTION__, size, rmesa->dma.minimum_size);
/* unmap old reserved bo */
struct radeon_bo **pbo, int *poffset,
int bytes, int alignment)
{
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
if (rmesa->dma.flush)
{
struct radeon_dma_bo *dma_bo = CALLOC_STRUCT(radeon_dma_bo);
struct radeon_dma_bo *temp;
- if (RADEON_DEBUG & DEBUG_DMA)
+ if (RADEON_DEBUG & RADEON_DMA)
fprintf(stderr, "%s\n", __FUNCTION__);
foreach_s(dma_bo, temp, &rmesa->dma.free) {
if (is_empty_list(&rmesa->dma.reserved))
return;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d\n", __FUNCTION__, return_bytes);
rmesa->dma.current_used -= return_bytes;
rmesa->dma.current_vertexptr = rmesa->dma.current_used;
const int expire_at = ++rmesa->dma.free.expire_counter + DMA_BO_FREE_TIME;
const int time = rmesa->dma.free.expire_counter;
- if (RADEON_DEBUG & DEBUG_DMA) {
+ if (RADEON_DEBUG & RADEON_DMA) {
size_t free = 0,
wait = 0,
reserved = 0;
struct radeon_dma *dma = &rmesa->dma;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
dma->flush = NULL;
{
GLuint bytes = vsize * nverts;
void *head;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
{
radeonContextPtr radeon = RADEON_CONTEXT( ctx );
int i;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
if (radeon->dma.flush) {
#include "radeon_common.h"
#include "radeon_mipmap_tree.h"
-#define FILE_DEBUG_FLAG DEBUG_TEXTURE
+#define FILE_DEBUG_FLAG RADEON_TEXTURE
#define DBG(...) do { \
if (RADEON_DEBUG & FILE_DEBUG_FLAG) \
_mesa_printf(__VA_ARGS__); \
uint32_t *cmd = (uint32_t *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_start);
int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
assert( rmesa->radeon.dma.flush == radeonFlushElts );
END_BATCH();
- if (RADEON_DEBUG & DEBUG_SYNC) {
+ if (RADEON_DEBUG & RADEON_SYNC) {
fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
radeonFinish( rmesa->radeon.glCtx );
}
int align_min_nr;
BATCH_LOCALS(&rmesa->radeon);
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
retval = (GLushort *)(rmesa->radeon.cmdbuf.cs->packets + rmesa->tcl.elt_cmd_offset);
- if (RADEON_DEBUG & DEBUG_PRIMS)
+ if (RADEON_DEBUG & RADEON_RENDER)
fprintf(stderr, "%s: header prim %x \n",
__FUNCTION__, primitive);
#else
BATCH_LOCALS(&rmesa->radeon);
- if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_IOCTL))
+ if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_IOCTL))
fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
__FUNCTION__, vertex_size, offset);
int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
int i;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s\n", __FUNCTION__);
BEGIN_BATCH(sz+2+(nr * 2));
GLuint color_mask = 0;
GLuint orig_mask = mask;
- if ( RADEON_DEBUG & DEBUG_IOCTL ) {
+ if ( RADEON_DEBUG & RADEON_IOCTL ) {
fprintf( stderr, "radeonClear\n");
}
}
if ( mask ) {
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, mask);
_swrast_Clear( ctx, mask );
}
if (ctx->Light.ColorMaterialEnabled)
mask &= ~ctx->Light.ColorMaterialBitmask;
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s\n", __FUNCTION__);
check_twoside_fallback( ctx );
update_global_ambient( ctx );
}
- else if (RADEON_DEBUG & (DEBUG_PRIMS|DEBUG_STATE))
+ else if (RADEON_DEBUG & (RADEON_PRIMS|DEBUG_STATE))
fprintf(stderr, "%s: Elided noop material call\n", __FUNCTION__);
}
GLboolean tmp;
RADEON_STATECHANGE( rmesa, tcl );
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s %d\n", __FUNCTION__, ctx->_NeedEyeCoords);
if (ctx->_NeedEyeCoords)
if (ATOMIC_INC_AND_FETCH(radeon->dri.hwLockCount) > 1)
{
#ifndef NDEBUG
- if ( RADEON_DEBUG & DEBUG_SANITY )
+ if ( RADEON_DEBUG & RADEON_SANITY )
fprintf(stderr, "*** %d times of recursive call to %s ***\n"
"Original call was from %s (file: %s line: %d)\n"
"Now call is coming from %s (file: %s line: %d)\n"
int size = 1;
radeonContextPtr rmesa = RADEON_CONTEXT(ctx);
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__FUNCTION__, count, stride);
static void emit_s0_vec(uint32_t *out, GLvoid *data, int stride, int count)
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__FUNCTION__, count, stride);
{
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
__FUNCTION__, count, stride);
int emitsize;
uint32_t *out;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s %d/%d\n", __FUNCTION__, count, size);
switch (size) {
GLuint vtx, unit;
#if 0
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
_tnl_print_vert_flags( __FUNCTION__, inputs );
#endif
union emit_union *v = (union emit_union *)dest;
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __FUNCTION__);
coord = (GLuint (*)[4])VB->ObjPtr->data;
coord_stride = VB->ObjPtr->stride;
lvl->faces[face].offset = *curOffset;
*curOffset += lvl->size;
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr,
"level %d, face %d: rs:%d %dx%d at %d\n",
level, face, lvl->rowstride, lvl->width, lvl->height, lvl->faces[face].offset);
#define MORE_VERBOSE 1
#if MORE_VERBOSE
-#define VERBOSE (RADEON_DEBUG & DEBUG_VERBOSE)
+#define VERBOSE (RADEON_DEBUG & RADEON_VERBOSE)
#define NORMAL (1)
#else
#define VERBOSE 0
-#define NORMAL (RADEON_DEBUG & DEBUG_VERBOSE)
+#define NORMAL (RADEON_DEBUG & RADEON_VERBOSE)
#endif
extern const struct dri_extension gl_20_extension[];
-#ifndef RADEON_DEBUG
-
-static const struct dri_debug_control debug_control[] = {
- {"fall", DEBUG_FALLBACKS},
- {"tex", DEBUG_TEXTURE},
- {"ioctl", DEBUG_IOCTL},
- {"prim", DEBUG_PRIMS},
- {"vert", DEBUG_VERTS},
- {"state", DEBUG_STATE},
- {"code", DEBUG_CODEGEN},
- {"vfmt", DEBUG_VFMT},
- {"vtxf", DEBUG_VFMT},
- {"verb", DEBUG_VERBOSE},
- {"dri", DEBUG_DRI},
- {"dma", DEBUG_DMA},
- {"san", DEBUG_SANITY},
- {"sync", DEBUG_SYNC},
- {"pix", DEBUG_PIXEL},
- {"mem", DEBUG_MEMORY},
- {"allmsg", ~DEBUG_SYNC}, /* avoid the term "sync" because the parser uses strstr */
- {NULL, 0}
-};
-#endif /* RADEON_DEBUG */
-
#endif /* RADEON_COMMON && defined(RADEON_COMMON_FOR_R300) */
extern const struct dri_extension card_extensions[];
return NULL;
}
-#if DO_DEBUG && RADEON_COMMON && defined(RADEON_COMMON_FOR_R300)
- RADEON_DEBUG = driParseDebugString(getenv("RADEON_DEBUG"), debug_control);
-#endif
+ radeon_init_debug();
+
/* parse information in __driConfigOptions */
driParseOptionInfo (&screen->optionCache,
__driConfigOptions, __driNConfigOptions);
return NULL;
}
-#if DO_DEBUG && RADEON_COMMON && defined(RADEON_COMMON_FOR_R300)
- RADEON_DEBUG = driParseDebugString(getenv("RADEON_DEBUG"), debug_control);
-#endif
+ radeon_init_debug();
/* parse information in __driConfigOptions */
driParseOptionInfo (&screen->optionCache,
if (ctx->Light.ColorMaterialEnabled)
mask &= ~ctx->Light.ColorMaterialBitmask;
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s\n", __FUNCTION__);
r100ContextPtr rmesa = R100_CONTEXT(ctx);
GLuint p, flag;
- if ( RADEON_DEBUG & DEBUG_STATE )
+ if ( RADEON_DEBUG & RADEON_STATE )
fprintf( stderr, "%s( %s = %s )\n", __FUNCTION__,
_mesa_lookup_enum_by_nr( cap ),
state ? "GL_TRUE" : "GL_FALSE" );
GLboolean tmp;
RADEON_STATECHANGE( rmesa, tcl );
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s %d BEFORE %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL] &= ~RADEON_RESCALE_NORMALS;
}
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s %d AFTER %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
}
#include "radeon_state.h"
#include "radeon_swtcl.h"
#include "radeon_tcl.h"
+#include "radeon_debug.h"
/* R100: xyzw, c0, c1/fog, stq[0..2] = 4+1+1+3*3 = 15 right? */
NULL, 0 );
rmesa->radeon.swtcl.vertex_size /= 4;
RENDERINPUTS_COPY( rmesa->radeon.tnl_index_bitset, index_bitset );
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf( stderr, "%s: vertex_size= %d floats\n",
- __FUNCTION__, rmesa->radeon.swtcl.vertex_size);
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE,
+ "%s: vertex_size= %d floats\n", __FUNCTION__, rmesa->radeon.swtcl.vertex_size);
}
}
if (!length)
continue;
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "radeon_render.c: prim %s %d..%d\n",
+ radeon_print(RADEON_SWRENDER, RADEON_NORMAL,
+ "radeon_render.c: prim %s %d..%d\n",
_mesa_lookup_enum_by_nr(prim & PRIM_MODE_MASK),
start, start+length);
TCL_FALLBACK( ctx, RADEON_TCL_FALLBACK_RASTER, GL_TRUE );
_swsetup_Wakeup( ctx );
rmesa->radeon.swtcl.RenderIndex = ~0;
- if (RADEON_DEBUG & DEBUG_FALLBACKS) {
+ if (RADEON_DEBUG & RADEON_FALLBACKS) {
fprintf(stderr, "Radeon begin rasterization fallback: 0x%x %s\n",
bit, getFallbackString(bit));
}
radeonChooseVertexState( ctx );
radeonChooseRenderState( ctx );
}
- if (RADEON_DEBUG & DEBUG_FALLBACKS) {
+ if (RADEON_DEBUG & RADEON_FALLBACKS) {
fprintf(stderr, "Radeon end rasterization fallback: 0x%x %s\n",
bit, getFallbackString(bit));
}
// radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
// __FUNCTION__ );
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon end tcl fallback\n");
}
if (mode) {
rmesa->radeon.TclFallback |= bit;
if (oldfallback == 0) {
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon begin tcl fallback %s\n",
getFallbackString( bit ));
transition_to_swtnl( ctx );
else {
rmesa->radeon.TclFallback &= ~bit;
if (oldfallback == bit) {
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon end tcl fallback %s\n",
getFallbackString( bit ));
transition_to_hwtnl( ctx );
GLuint unit = ctx->Texture.CurrentUnit;
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
- if ( RADEON_DEBUG & DEBUG_STATE ) {
+ if ( RADEON_DEBUG & RADEON_STATE ) {
fprintf( stderr, "%s( %s )\n",
__FUNCTION__, _mesa_lookup_enum_by_nr( pname ) );
}
{
radeonTexObj* t = radeon_tex_obj(texObj);
- if ( RADEON_DEBUG & (DEBUG_STATE|DEBUG_TEXTURE) ) {
- fprintf( stderr, "%s( %s )\n", __FUNCTION__,
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s( %s )\n", __FUNCTION__,
_mesa_lookup_enum_by_nr( pname ) );
- }
switch ( pname ) {
case GL_TEXTURE_MIN_FILTER:
radeonTexObj* t = radeon_tex_obj(texObj);
int i;
- if ( RADEON_DEBUG & (DEBUG_STATE|DEBUG_TEXTURE) ) {
- fprintf( stderr, "%s( %p (target = %s) )\n", __FUNCTION__, (void *)texObj,
+ radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
+ "%s( %p (target = %s) )\n", __FUNCTION__, (void *)texObj,
_mesa_lookup_enum_by_nr( texObj->Target ) );
- }
if ( rmesa ) {
radeon_firevertices(&rmesa->radeon);
assert( (texUnit->_ReallyEnabled == 0)
|| (texUnit->_Current != NULL) );
- if ( RADEON_DEBUG & DEBUG_TEXTURE ) {
+ if ( RADEON_DEBUG & RADEON_TEXTURE ) {
fprintf( stderr, "%s( %p, %d )\n", __FUNCTION__, (void *)ctx, unit );
}
(texUnit->GenS.Mode != texUnit->GenQ.Mode)) ) {
/* Mixed modes, fallback:
*/
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "fallback mixed texgen\n");
return GL_FALSE;
}
}
else {
/* some texgen mode not including both S and T bits */
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "fallback mixed texgen/nontexgen\n");
return GL_FALSE;
}
default:
/* Unsupported mode, fallback:
*/
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "fallback GL_SPHERE_MAP\n");
return GL_FALSE;
}
if (t->validated || t->image_override)
return GL_TRUE;
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, "%s: Validating texture %p now\n", __FUNCTION__, texObj);
if (baseimage->base.Border > 0)
}
if (!t->mt) {
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, " Allocate new miptree\n");
radeon_try_alloc_miptree(rmesa, t, &baseimage->base, 0, texObj->BaseLevel);
if (!t->mt) {
for(face = 0; face < t->mt->faces; ++face) {
for(level = t->mt->firstLevel; level <= t->mt->lastLevel; ++level) {
radeon_texture_image *image = get_radeon_texture_image(texObj->Image[face][level]);
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, " face %i, level %i... %p vs %p ", face, level, t->mt, image->mt);
if (t->mt == image->mt) {
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, "OK\n");
continue;
}
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, "migrating\n");
migrate_image_to_miptree(t->mt, image, face, level);
}