}
src_ptr = ADD_POINTERS(mesa_ind_buf->obj->Pointer, mesa_ind_buf->ptr);
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Fixing index buffer format. type %d\n",
+ __func__, mesa_ind_buf->type);
+
if (mesa_ind_buf->type == GL_UNSIGNED_BYTE) {
GLuint size = sizeof(GLushort) * ((mesa_ind_buf->count + 1) & ~1);
GLubyte *in = (GLubyte *)src_ptr;
r300->ind_buf.bo = NULL;
return;
}
+ radeon_print(RADEON_RENDER, RADEON_TRACE, "%s\n", __func__);
#if MESA_BIG_ENDIAN
if (mesa_ind_buf->type == GL_UNSIGNED_INT) {
radeonAllocDmaRegion(&r300->radeon, &attr->bo, &attr->bo_offset, sizeof(GLfloat) * input->Size * count, 32);
dst_ptr = (GLfloat *)ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
- if (RADEON_DEBUG & DEBUG_FALLBACKS) {
- fprintf(stderr, "%s: Converting vertex attributes, attribute data format %x,", __FUNCTION__, input->Type);
- fprintf(stderr, "stride %d, components %d\n", stride, input->Size);
- }
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Converting vertex attributes, attribute data format %x,"
+ "stride %d, components %d\n"
+ , __FUNCTION__, input->Type
+ , stride, input->Size);
assert(src_ptr != NULL);
mapped_named_bo = GL_TRUE;
}
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT, "%s. Vertex alignment doesn't match hw requirements.\n", __func__);
+
{
GLvoid *src_ptr = ADD_POINTERS(input->BufferObj->Pointer, input->Ptr);
GLvoid *dst_ptr = ADD_POINTERS(attr->bo->ptr, attr->bo_offset);
GLenum type;
GLuint stride;
+ radeon_print(RADEON_RENDER, RADEON_TRACE, "%s\n", __func__);
stride = (input->StrideB == 0) ? getTypeSize(input->Type) * input->Size : input->StrideB;
if (input->Type == GL_DOUBLE || input->Type == GL_UNSIGNED_INT || input->Type == GL_INT ||
{
r300ContextPtr r300 = R300_CONTEXT(ctx);
struct r300_vertex_buffer *vbuf = &r300->vbuf;
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
{
int i, tmp;
GLuint stride;
int ret;
int i, index;
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE,
+ "%s: count %d num_attribs %d\n",
+ __func__, count, vbuf->num_attribs);
for (index = 0; index < vbuf->num_attribs; index++) {
struct radeon_aos *aos = &r300->radeon.tcl.aos[index];
* to prevent double unref in radeonReleaseArrays
* called during context destroy
*/
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s\n", __func__);
r300ContextPtr r300 = R300_CONTEXT(ctx);
{
int i;
dwords = 2*CACHE_FLUSH_BUFSZ;
dwords += PRE_EMIT_STATE_BUFSZ;
dwords += (AOS_BUFSZ(vbuf->num_attribs)
- + SCISSORS_BUFSZ
+ + SCISSORS_BUFSZ*2
+ FIREAOS_BUFSZ )*nr_prims;
state_size = radeonCountStateEmitSize(&r300->radeon);
else
dwords += state_size;
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: total prediction size is %d.\n", __FUNCTION__, dwords);
return dwords;
}
struct r300_context *r300 = R300_CONTEXT(ctx);
GLuint i;
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: %u (%d-%d) cs begin at %d\n",
+ radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s: %u (%d-%d) cs begin at %d\n",
__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );
if (ctx->NewState)
r300FreeData(ctx);
- if (RADEON_DEBUG & DEBUG_PRIMS)
- fprintf(stderr, "%s: %u (%d-%d) cs ending at %d\n",
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s: %u (%d-%d) cs ending at %d\n",
__FUNCTION__, nr_prims, min_index, max_index, r300->radeon.cmdbuf.cs->cdw );
if (emit_end < r300->radeon.cmdbuf.cs->cdw)
}
if (min_index) {
+ radeon_print(RADEON_FALLBACKS, RADEON_IMPORTANT,
+ "%s: Rebasing primitives. %p nr_prims %d min_index %u max_index %u\n",
+ __func__, prim, nr_prims, min_index, max_index);
vbo_rebase_prims( ctx, arrays, prim, nr_prims, ib, min_index, max_index, r300DrawPrims );
return;
}