../common/dri_util.c
RADEON_COMMON_SOURCES = \
- radeon_texture.c \
+ radeon_bo_legacy.c \
+ radeon_buffer_objects.c \
radeon_common_context.c \
radeon_common.c \
+ radeon_cs_legacy.c \
radeon_dma.c \
+ radeon_debug.c \
+ radeon_fbo.c \
radeon_lock.c \
- radeon_bo_legacy.c \
- radeon_cs_legacy.c \
radeon_mipmap_tree.c \
radeon_span.c \
- radeon_fbo.c \
- radeon_buffer_objects.c \
- radeon_queryobj.c
+ radeon_queryobj.c \
+ radeon_texture.c
DRIVER_SOURCES = \
radeon_screen.c \
return;
}
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr,"rrb is %p %d %dx%d\n", rrb, offset, rrb->base.Width, rrb->base.Height);
cbpitch = (rrb->pitch / rrb->cpp);
if (rrb->cpp == 4)
r300->radeon.hw.max_state_size = 2 + 2; /* reserve extra space for WAIT_IDLE and tex cache flush */
mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
- if (RADEON_DEBUG & DEBUG_TEXTURE) {
+ if (RADEON_DEBUG & RADEON_TEXTURE) {
fprintf(stderr, "Using %d maximum texture units..\n", mtu);
}
}
src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Fixing index buffer format. type %d\n",
+ __func__, mesa_ind_buf->type);
+
if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
GLubyte *in = (GLubyte *)src_ptr;
r300->ind_buf.bo = NULL;
return;
}
+ radeon_print(RADEON_RENDER, RADEON_TRACE, "%s\n", __func__);
#if MESA_BIG_ENDIAN
if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
- if (RADEON_DEBUG & DEBUG_FALLBACKS) {
- fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
- fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
- }
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Converting vertex attributes, attribute data format %x,"
+ "stride %d, components %d\n"
+ , __FUNCTION__, input->Type
+ , stride, input->Size);
assert(src_ptr != NULL);
mapped_named_bo = GL_TRUE;
}
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, "%s. Vertex alignment doesn't match hw requirements.\n", __func__);
+
{
GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
GLenum type;
GLuint stride;
+ radeon_print(RADEON_RENDER, RADEON_TRACE, "%s\n", __func__);
stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_vertex_buffer *vbuf = &r300->vbuf;
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
{
int i, tmp;
GLuint stride;
int ret;
int i, index;
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE,
+ "%s: count %d num_attribs %d\n",
+ __func__, count, vbuf->num_attribs);
for (index = 0; index < vbuf->num_attribs; index++) {
struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
* to prevent double unref in radeonReleaseArrays
* called during context destroy
*/
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
r300ContextPtr r300 = R300_CONTEXT(ctx);
{
int i;
else
dwords += state_size;
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
return dwords;
}
struct r300_context *r300 = R300_CONTEXT(ctx);
GLuint i;
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: %u (%d-%d) cs begin at %d\n",
+ radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s: %u (%d-%d) cs begin at %d\n",
__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );
if (ctx->NewState)
r300FreeData(ctx);
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: %u (%d-%d) cs ending at %d\n",
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: %u (%d-%d) cs ending at %d\n",
__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );
if (emit_end < r300->radeon.cmdbuf.cs->cdw)
}
if (min_index) {
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
+ __func__, prim, nr_prims, min_index, max_index);
vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
return;
}
struct r300_fragment_program_compiler compiler;
rc_init(&compiler.Base);
- compiler.Base.Debug = (RADEON_DEBUG & DEBUG_PIXEL) ? GL_TRUE : GL_FALSE;
+ compiler.Base.Debug = (RADEON_DEBUG & RADEON_PIXEL) ? GL_TRUE : GL_FALSE;
compiler.code = &fp->code;
compiler.state = fp->state;
GLuint cbpitch = 0;
r300ContextPtr rmesa = r300;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "%s: buffer %p (%i,%i %ix%i)\n",
__FUNCTION__, rrb, dPriv->x, dPriv->y,
dPriv->w, dPriv->h);
int i, ret;
struct gl_framebuffer *fb = ctx->DrawBuffer;
- if (RADEON_DEBUG & DEBUG_IOCTL)
+ if (RADEON_DEBUG & RADEON_IOCTL)
fprintf(stderr, "r300Clear\n");
if (!r300->radeon.radeonScreen->driScreen->dri2.enabled) {
}
if (swrast_mask) {
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "%s: swrast clear, mask: %x\n",
__FUNCTION__, swrast_mask);
_swrast_Clear(ctx, swrast_mask);
int sz = 1 + (nr >> 1) * 3 + (nr & 1) * 2;
int i;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s: nr=%d, ofs=0x%08x\n", __FUNCTION__, nr,
offset);
if (mode) {
if ((fallback_warn & bit) == 0) {
- if (RADEON_DEBUG & DEBUG_FALLBACKS)
+ if (RADEON_DEBUG & RADEON_FALLBACKS)
_mesa_fprintf(stderr, "WARNING! Falling back to software for %s\n", getFallbackString(bit));
fallback_warn |= bit;
}
r300->hw.txe.cmd[R300_TXE_ENABLE] = 0x0;
mtu = r300->radeon.glCtx->Const.MaxTextureUnits;
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "mtu=%d\n", mtu);
if (mtu > R300_MAX_TEXTURE_UNITS) {
t->pp_txformat & 0xff);
}
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr,
"Activating texture unit %d\n", i);
r300->vtbl.SetupFragmentShaderTextures(ctx, tmu_mappings);
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "TX_ENABLE: %08x last_hw_tmu=%d\n",
r300->hw.txe.cmd[R300_TXE_ENABLE], last_hw_tmu);
}
static void r300Enable(GLcontext * ctx, GLenum cap, GLboolean state)
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s( %s = %s )\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(cap),
state ? "GL_TRUE" : "GL_FALSE");
has_tcl = r300->options.hw_tcl_enabled;
- if (RADEON_DEBUG & DEBUG_STATE)
+ if (RADEON_DEBUG & RADEON_STATE)
fprintf(stderr, "%s\n", __FUNCTION__);
radeon_firevertices(&r300->radeon);
GLuint fp_reads = rmesa->selected_fp->InputsRead;
struct vertex_attribute *attrs = rmesa->vbuf.attribs;
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
rmesa->swtcl.coloroffset = rmesa->swtcl.specoffset = 0;
rmesa->radeon.swtcl.vertex_attr_count = 0;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s\n", __func__);
/* We always want non Ndc coords format */
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
GLuint InputsRead, OutputsWritten;
+ radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
r300ChooseSwtclVertexFormat(ctx, &InputsRead, &OutputsWritten);
r300SetupVAP(ctx, InputsRead, OutputsWritten);
rmesa->radeon.swtcl.emit_prediction += rmesa->radeon.cmdbuf.cs->cdw
+ vertex_size + scissor_size + prim_size + cache_flush_size * 2 + pre_emit_state;
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE,
+ "%s, size %d\n",
+ __func__, rmesa->radeon.cmdbuf.cs->cdw
+ + vertex_size + scissor_size + prim_size + cache_flush_size * 2 + pre_emit_state);
}
}
r300ContextPtr rmesa = R300_CONTEXT(ctx);
GLuint index = 0;
GLuint flags = ctx->_TriangleCaps;
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
if (flags & DD_TRI_UNFILLED) index |= R300_UNFILLED_BIT;
void r300RenderStart(GLcontext *ctx)
{
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
r300ContextPtr rmesa = R300_CONTEXT( ctx );
r300ChooseRenderState(ctx);
static void r300RasterPrimitive( GLcontext *ctx, GLuint hwprim )
{
r300ContextPtr rmesa = R300_CONTEXT(ctx);
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
if (rmesa->radeon.swtcl.hw_primitive != hwprim) {
R300_NEWPRIM( rmesa );
r300ContextPtr rmesa = R300_CONTEXT(ctx);
rmesa->radeon.swtcl.render_primitive = prim;
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
if ((prim == GL_TRIANGLES) && (ctx->_TriangleCaps & DD_TRI_UNFILLED))
return;
void r300ResetLineStipple(GLcontext *ctx)
{
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s\n", __func__);
}
TNLcontext *tnl = TNL_CONTEXT(ctx);
r300ContextPtr rmesa = R300_CONTEXT(ctx);
static int firsttime = 1;
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_NORMAL, "%s\n", __func__);
if (firsttime) {
init_rast_tab();
{
BATCH_LOCALS(&rmesa->radeon);
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s: vertex_size %d, offset 0x%x \n",
+ radeon_print(RADEON_SWRENDER, RADEON_TRACE,
+ "%s: vertex_size %d, offset 0x%x \n",
__FUNCTION__, vertex_size, offset);
BEGIN_BATCH(7);
{
BATCH_LOCALS(&rmesa->radeon);
int type, num_verts;
- if (RADEON_DEBUG & DEBUG_VERTS)
+ if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s\n", __func__);
type = r300PrimitiveType(rmesa, primitive);
void r300_swtcl_flush(GLcontext *ctx, uint32_t current_offset)
{
- if (RADEON_DEBUG & DEBUG_VERTS)
- fprintf(stderr, "%s\n", __func__);
+ radeon_print(RADEON_SWRENDER, RADEON_TRACE, "%s\n", __func__);
r300ContextPtr rmesa = R300_CONTEXT(ctx);
r300EmitCacheFlush(rmesa);
| R300_TX_MIN_FILTER_ANISO
| R300_TX_MIN_FILTER_MIP_LINEAR
| aniso_filter(anisotropy);
- if (RADEON_DEBUG & DEBUG_TEXTURE)
+ if (RADEON_DEBUG & RADEON_TEXTURE)
fprintf(stderr, "Using maximum anisotropy of %f\n", anisotropy);
return;
}
{
radeonTexObj* t = radeon_tex_obj(texObj);
- if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
+ if (RADEON_DEBUG & (RADEON_STATE | RADEON_TEXTURE)) {
fprintf(stderr, "%s( %s )\n", __FUNCTION__,
_mesa_lookup_enum_by_nr(pname));
}
r300ContextPtr rmesa = R300_CONTEXT(ctx);
radeonTexObj* t = radeon_tex_obj(texObj);
- if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
+ if (RADEON_DEBUG & (RADEON_STATE | RADEON_TEXTURE)) {
fprintf(stderr, "%s( %p (target = %s) )\n", __FUNCTION__,
(void *)texObj,
_mesa_lookup_enum_by_nr(texObj->Target));
radeonTexObj* t = CALLOC_STRUCT(radeon_tex_obj);
- if (RADEON_DEBUG & (DEBUG_STATE | DEBUG_TEXTURE)) {
+ if (RADEON_DEBUG & (RADEON_STATE | RADEON_TEXTURE)) {
fprintf(stderr, "%s( %p (target = %s) )\n", __FUNCTION__,
t, _mesa_lookup_enum_by_nr(target));
}
_mesa_memcpy(&vp->key, wanted_key, sizeof(vp->key));
rc_init(&compiler.Base);
- compiler.Base.Debug = (RADEON_DEBUG & DEBUG_VERTS) ? GL_TRUE : GL_FALSE;
+ compiler.Base.Debug = (RADEON_DEBUG & RADEON_VERTS) ? GL_TRUE : GL_FALSE;
compiler.code = &vp->code;
compiler.RequiredOutputs = compute_required_outputs(vp->Base, vp->key.FpReads);