Consistently just use C99's __func__ everywhere.
No functional changes.
Signed-off-by: Marius Predut <marius.predut@intel.com>
Acked-by: Michel Dänzer <michel.daenzer@amd.com>
Reviewed-by: Emil Velikov <emil.l.velikov@gmail.com>
/* Flush is needed to make sure that source buffer has correct data */
radeonFlush(&r200->radeon.glCtx);
- rcommonEnsureCmdBufSpace(&r200->radeon, 102, __FUNCTION__);
+ rcommonEnsureCmdBufSpace(&r200->radeon, 102, __func__);
if (!validate_buffers(r200, src_bo, dst_bo))
return GL_FALSE;
radeonEmitState(&rmesa->radeon);
radeon_print(RADEON_RENDER|RADEON_SWRENDER,RADEON_VERBOSE,
- "%s cmd_used/4: %d prim %x nr %d\n", __FUNCTION__,
+ "%s cmd_used/4: %d prim %x nr %d\n", __func__,
rmesa->store.cmd_used/4, primitive, vertex_nr);
BEGIN_BATCH(3);
r200ContextPtr rmesa = R200_CONTEXT(ctx);
int nr, elt_used = rmesa->tcl.elt_used;
- radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %x %d\n", __FUNCTION__, rmesa->tcl.hw_primitive, elt_used);
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %x %d\n", __func__, rmesa->tcl.hw_primitive, elt_used);
assert( rmesa->radeon.dma.flush == r200FlushElts );
rmesa->radeon.dma.flush = NULL;
{
GLushort *retval;
- radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
+ radeon_print(RADEON_RENDER, RADEON_VERBOSE, "%s %d prim %x\n", __func__, min_nr, primitive);
assert((primitive & R200_VF_PRIM_WALK_IND));
BATCH_LOCALS(&rmesa->radeon);
radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s: vertex_size 0x%x offset 0x%x \n",
- __FUNCTION__, vertex_size, offset);
+ __func__, vertex_size, offset);
BEGIN_BATCH(7);
radeon_print(RADEON_RENDER, RADEON_VERBOSE,
"%s: nr=%d, ofs=0x%08x\n",
- __FUNCTION__, nr, offset);
+ __func__, nr, offset);
BEGIN_BATCH(sz+2+ (nr*2));
OUT_BATCH_PACKET3(R200_CP_CMD_3D_LOAD_VBPNTR, sz - 1);
if ( swmask ) {
if (R200_DEBUG & RADEON_FALLBACKS)
- fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, swmask);
+ fprintf(stderr, "%s: swrast clear, mask: %x\n", __func__, swmask);
_swrast_Clear( ctx, swmask );
}
}
}
- fprintf(stderr, "leaving %s\n\n\n", __FUNCTION__);
+ fprintf(stderr, "leaving %s\n\n\n", __func__);
return 0;
}
default:
fprintf( stderr, "[%s:%u] Invalid RGB blend equation (0x%04x).\n",
- __FUNCTION__, __LINE__, ctx->Color.Blend[0].EquationRGB );
+ __func__, __LINE__, ctx->Color.Blend[0].EquationRGB );
return;
}
default:
fprintf( stderr, "[%s:%u] Invalid A blend equation (0x%04x).\n",
- __FUNCTION__, __LINE__, ctx->Color.Blend[0].EquationA );
+ __func__, __LINE__, ctx->Color.Blend[0].EquationA );
return;
}
/* factor *= 2; */
/* constant *= 2; */
-/* fprintf(stderr, "%s f:%f u:%f\n", __FUNCTION__, factor, constant); */
+/* fprintf(stderr, "%s f:%f u:%f\n", __func__, factor, constant); */
R200_STATECHANGE( rmesa, zbs );
rmesa->hw.zbs.cmd[ZBS_SE_ZBIAS_FACTOR] = factoru.ui32;
{
struct gl_light *l = &ctx->Light.Light[p];
-/* fprintf(stderr, "%s\n", __FUNCTION__); */
+/* fprintf(stderr, "%s\n", __func__); */
if (l->Enabled) {
r200ContextPtr rmesa = R200_CONTEXT(ctx);
mask &= ~ctx->Light._ColorMaterialBitmask;
if (R200_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
if (mask & MAT_BIT_FRONT_EMISSION) {
fcmd[MTL_EMMISSIVE_RED] = mat[MAT_ATTRIB_FRONT_EMISSION][0];
GLuint p, flag;
if ( R200_DEBUG & RADEON_STATE )
- fprintf( stderr, "%s( %s = %s )\n", __FUNCTION__,
+ fprintf( stderr, "%s( %s = %s )\n", __func__,
_mesa_lookup_enum_by_nr( cap ),
state ? "GL_TRUE" : "GL_FALSE" );
GLboolean tmp;
if (R200_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s %d BEFORE %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
+ fprintf(stderr, "%s %d BEFORE %x\n", __func__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL_0]);
if (ctx->_NeedEyeCoords)
}
if (R200_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s %d AFTER %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
+ fprintf(stderr, "%s %d AFTER %x\n", __func__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL_0]);
}
int unit;
if (R200_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s before COMPSEL: %x\n", __FUNCTION__,
+ fprintf(stderr, "%s before COMPSEL: %x\n", __func__,
rmesa->hw.vtx.cmd[VTX_TCL_OUTPUT_COMPSEL]);
rmesa->TexMatEnabled = 0;
int i, ret;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
radeon_cs_space_reset_bos(rmesa->radeon.cmdbuf.cs);
rrb = radeon_get_colorbuffer(&rmesa->radeon);
GLboolean has_material;
if (0)
- fprintf(stderr, "%s, newstate: %x\n", __FUNCTION__, rmesa->radeon.NewGLState);
+ fprintf(stderr, "%s, newstate: %x\n", __func__, rmesa->radeon.NewGLState);
/* Validate state:
*/
extern void r200Fallback( struct gl_context *ctx, GLuint bit, GLboolean mode );
#define FALLBACK( rmesa, bit, mode ) do { \
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
- __FUNCTION__, bit, mode ); \
+ __func__, bit, mode ); \
r200Fallback( &rmesa->radeon.glCtx, bit, mode ); \
} while (0)
if (rcommonEnsureCmdBufSpace(&rmesa->radeon,
state_size +
vertex_array_size + prim_size,
- __FUNCTION__))
+ __func__))
rmesa->radeon.swtcl.emit_prediction = radeonCountStateEmitSize(&rmesa->radeon);
else
rmesa->radeon.swtcl.emit_prediction = state_size;
"%s space %u, aos %d\n",
__func__, space_required, AOS_BUFSZ(nr_aos) );
/* flush the buffer in case we need more than is left. */
- if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required + state_size, __FUNCTION__))
+ if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required + state_size, __func__))
return space_required + radeonCountStateEmitSize( &rmesa->radeon );
else
return space_required + state_size;
if (rmesa->radeon.TclFallback)
return GL_TRUE; /* fallback to software t&l */
- radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_RENDER, RADEON_NORMAL, "%s\n", __func__);
if (VB->Count == 0)
return GL_FALSE;
is_clamp_to_border = GL_TRUE;
break;
default:
- _mesa_problem(NULL, "bad S wrap mode in %s", __FUNCTION__);
+ _mesa_problem(NULL, "bad S wrap mode in %s", __func__);
}
if (tObj->Target != GL_TEXTURE_1D) {
is_clamp_to_border = GL_TRUE;
break;
default:
- _mesa_problem(NULL, "bad T wrap mode in %s", __FUNCTION__);
+ _mesa_problem(NULL, "bad T wrap mode in %s", __func__);
}
}
is_clamp_to_border = GL_TRUE;
break;
default:
- _mesa_problem(NULL, "bad R wrap mode in %s", __FUNCTION__);
+ _mesa_problem(NULL, "bad R wrap mode in %s", __func__);
}
if ( is_clamp_to_border ) {
struct gl_texture_unit *texUnit = &ctx->Texture.Unit[unit];
radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_VERBOSE, "%s( %s )\n",
- __FUNCTION__, _mesa_lookup_enum_by_nr( pname ) );
+ __func__, _mesa_lookup_enum_by_nr( pname ) );
/* This is incorrect: Need to maintain this data for each of
* GL_TEXTURE_{123}D, GL_TEXTURE_RECTANGLE_NV, etc, and switch
radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_VERBOSE,
"%s(%p, tex %p) pname %s\n",
- __FUNCTION__, ctx, texObj,
+ __func__, ctx, texObj,
_mesa_lookup_enum_by_nr( pname ) );
switch ( pname ) {
radeonTexObj* t = radeon_tex_obj(texObj);
radeon_print(RADEON_TEXTURE | RADEON_STATE, RADEON_NORMAL,
- "%s( %p (target = %s) )\n", __FUNCTION__,
+ "%s( %p (target = %s) )\n", __func__,
(void *)texObj,
_mesa_lookup_enum_by_nr(texObj->Target));
radeon_print(RADEON_STATE | RADEON_TEXTURE, RADEON_NORMAL,
"%s(%p) target %s, new texture %p.\n",
- __FUNCTION__, ctx,
+ __func__, ctx,
_mesa_lookup_enum_by_nr(target), t);
_mesa_initialize_texture_object(ctx, &t->base, name, target);
R200_TXA_TFACTOR_SEL_MASK | R200_TXA_TFACTOR1_SEL_MASK);
if ( R200_DEBUG & RADEON_TEXTURE ) {
- fprintf( stderr, "%s( %p, %d )\n", __FUNCTION__, (void *)ctx, unit );
+ fprintf( stderr, "%s( %p, %d )\n", __func__, (void *)ctx, unit );
}
/* Set the texture environment state. Isn't this nice and clean?
(unit * 4));
if (0)
- fprintf(stderr, "%s unit %d\n", __FUNCTION__, unit);
+ fprintf(stderr, "%s unit %d\n", __func__, unit);
if (texUnit->TexGenEnabled & S_BIT) {
mode = texUnit->GenS.Mode;
} else {
_mesa_problem(NULL, "unexpected texture format in %s",
- __FUNCTION__);
+ __func__);
return;
}
}
paramList = mesa_vp->Base.Parameters;
if(paramList->NumParameters > R200_VSF_MAX_PARAM){
- fprintf(stderr, "%s:Params exhausted\n", __FUNCTION__);
+ fprintf(stderr, "%s:Params exhausted\n", __func__);
return GL_FALSE;
}
*fcmd++ = paramList->ParameterValues[pi][3].f;
break;
default:
- _mesa_problem(NULL, "Bad param type in %s", __FUNCTION__);
+ _mesa_problem(NULL, "Bad param type in %s", __func__);
break;
}
if (pi == 95) {
case VARYING_SLOT_PSIZ:
return R200_VSF_OUT_CLASS_RESULT_POINTSIZE;
default:
- fprintf(stderr, "problem in %s, unknown dst output reg %d\n", __FUNCTION__, dst->Index);
+ fprintf(stderr, "problem in %s, unknown dst output reg %d\n", __func__, dst->Index);
exit(0);
return 0;
}
assert (dst->Index == 0);
return R200_VSF_OUT_CLASS_ADDR;
default:
- fprintf(stderr, "problem in %s, unknown register type %d\n", __FUNCTION__, dst->File);
+ fprintf(stderr, "problem in %s, unknown register type %d\n", __func__, dst->File);
exit(0);
return 0;
}
case PROGRAM_ADDRESS:
*/
default:
- fprintf(stderr, "problem in %s", __FUNCTION__);
+ fprintf(stderr, "problem in %s", __func__);
exit(0);
}
}
int i;
if(vp == NULL){
- fprintf(stderr, "vp null in call to %s from %s\n", __FUNCTION__, caller);
+ fprintf(stderr, "vp null in call to %s from %s\n", __func__, caller);
return ;
}
vp->inputs[src->Index] = max_reg+1;*/
- //vp_dump_inputs(vp, __FUNCTION__);
+ //vp_dump_inputs(vp, __func__);
assert(vp->inputs[src->Index] != -1);
return vp->inputs[src->Index];
} else {
case OPCODE_SLT: return R200_VPI_OUT_OP_SLT;
default:
- fprintf(stderr, "%s: Should not be called with opcode %d!", __FUNCTION__, opcode);
+ fprintf(stderr, "%s: Should not be called with opcode %d!", __func__, opcode);
}
exit(-1);
return 0;
/* Flush is needed to make sure that source buffer has correct data */
radeonFlush(ctx);
- rcommonEnsureCmdBufSpace(&r100->radeon, 59, __FUNCTION__);
+ rcommonEnsureCmdBufSpace(&r100->radeon, 59, __func__);
if (!validate_buffers(r100, src_bo, dst_bo))
return GL_FALSE;
* Prepare writing n dwords to the command buffer. Does not cause automatic
* state emits.
*/
-#define BEGIN_BATCH(n) rcommonBeginBatch(b_l_rmesa, n, __FILE__, __FUNCTION__, __LINE__)
+#define BEGIN_BATCH(n) rcommonBeginBatch(b_l_rmesa, n, __FILE__, __func__, __LINE__)
/**
* Write one dword to the command buffer.
int __offset = (offset); \
if (0 && __offset) { \
fprintf(stderr, "(%s:%s:%d) offset : %d\n", \
- __FILE__, __FUNCTION__, __LINE__, __offset); \
+ __FILE__, __func__, __LINE__, __offset); \
} \
radeon_cs_write_dword(b_l_rmesa->cmdbuf.cs, __offset); \
radeon_cs_write_reloc(b_l_rmesa->cmdbuf.cs, \
*/
#define END_BATCH() \
do { \
- radeon_cs_end(b_l_rmesa->cmdbuf.cs, __FILE__, __FUNCTION__, __LINE__);\
+ radeon_cs_end(b_l_rmesa->cmdbuf.cs, __FILE__, __func__, __LINE__);\
} while(0)
/**
ret = drmCommandWriteRead(radeon->dri.fd, DRM_RADEON_GETPARAM,
&gp, sizeof(gp));
if (ret) {
- fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __FUNCTION__,
+ fprintf(stderr, "%s: drmRadeonGetParam: %d\n", __func__,
ret);
exit(1);
}
void radeonDrawBuffer( struct gl_context *ctx, GLenum mode )
{
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "%s %s\n", __FUNCTION__,
+ fprintf(stderr, "%s %s\n", __func__,
_mesa_lookup_enum_by_nr( mode ));
if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
void radeonEmitState(radeonContextPtr radeon)
{
- radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s\n", __func__);
if (radeon->vtbl.pre_emit_state)
radeon->vtbl.pre_emit_state(radeon);
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s %d\n", __FUNCTION__, radeon->cmdbuf.cs->cdw);
+ fprintf(stderr, "%s %d\n", __func__, radeon->cmdbuf.cs->cdw);
/* okay if we have no cmds in the buffer &&
we have no DMA flush &&
radeon->dma.flush( ctx );
if (radeon->cmdbuf.cs->cdw)
- rcommonFlushCmdBuf(radeon, __FUNCTION__);
+ rcommonFlushCmdBuf(radeon, __func__);
flush_front:
if (_mesa_is_winsys_fbo(ctx->DrawBuffer) && radeon->front_buffer_dirty) {
rmesa->cmdbuf.flushing = 1;
if (RADEON_DEBUG & RADEON_IOCTL) {
- fprintf(stderr, "%s from %s\n", __FUNCTION__, caller);
+ fprintf(stderr, "%s from %s\n", __func__, caller);
}
radeonEmitQueryEnd(&rmesa->glCtx);
radeon_firevertices(radeon);
if (!is_empty_list(&radeon->dma.reserved)) {
- rcommonFlushCmdBuf( radeon, __FUNCTION__ );
+ rcommonFlushCmdBuf( radeon, __func__ );
}
radeonFreeDmaRegions(radeon);
radeonContextPtr radeon = (radeonContextPtr) driContextPriv->driverPrivate;
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "%s ctx %p\n", __FUNCTION__,
+ fprintf(stderr, "%s ctx %p\n", __func__,
&radeon->glCtx);
/* Unset current context and dispath table */
if (!driContextPriv) {
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "%s ctx is null\n", __FUNCTION__);
+ fprintf(stderr, "%s ctx is null\n", __func__);
_mesa_make_current(NULL, NULL, NULL);
return GL_TRUE;
}
&(radeon_get_renderbuffer(drfb, BUFFER_DEPTH)->base.Base));
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __FUNCTION__, &radeon->glCtx, drfb, readfb);
+ fprintf(stderr, "%s ctx %p dfb %p rfb %p\n", __func__, &radeon->glCtx, drfb, readfb);
if(driDrawPriv)
driUpdateFramebufferSize(&radeon->glCtx, driDrawPriv);
if (RADEON_DEBUG & RADEON_DRI)
- fprintf(stderr, "End %s\n", __FUNCTION__);
+ fprintf(stderr, "End %s\n", __func__);
return GL_TRUE;
}
if(__warn_once){ \
radeon_warning("*********************************WARN_ONCE*********************************\n"); \
radeon_warning("File %s function %s line %d\n", \
- __FILE__, __FUNCTION__, __LINE__); \
+ __FILE__, __func__, __LINE__); \
radeon_warning(__VA_ARGS__);\
radeon_warning("***************************************************************************\n"); \
__warn_once=0;\
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
- __FUNCTION__, count, stride, (void *)out, (void *)data);
+ __func__, count, stride, (void *)out, (void *)data);
if (stride == 4)
COPY_DWORDS(out, data, count);
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
- __FUNCTION__, count, stride, (void *)out, (void *)data);
+ __func__, count, stride, (void *)out, (void *)data);
if (stride == 8)
COPY_DWORDS(out, data, count * 2);
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
- __FUNCTION__, count, stride, (void *)out, (void *)data);
+ __func__, count, stride, (void *)out, (void *)data);
if (stride == 12) {
COPY_DWORDS(out, data, count * 3);
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d out %p data %p\n",
- __FUNCTION__, count, stride, (void *)out, (void *)data);
+ __func__, count, stride, (void *)out, (void *)data);
if (stride == 16)
COPY_DWORDS(out, data, count * 4);
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
- __FUNCTION__, count, stride);
+ __func__, count, stride);
if (stride == 0) {
radeonAllocDmaRegion( rmesa, &aos->bo, &aos->offset, size * 4, 32 );
rmesa->dma.minimum_size = (size + 15) & (~15);
radeon_print(RADEON_DMA, RADEON_NORMAL, "%s size %d minimum_size %Zi\n",
- __FUNCTION__, size, rmesa->dma.minimum_size);
+ __func__, size, rmesa->dma.minimum_size);
if (is_empty_list(&rmesa->dma.free)
|| last_elem(&rmesa->dma.free)->bo->size < size) {
RADEON_GEM_DOMAIN_GTT, 0);
if (!dma_bo->bo) {
- rcommonFlushCmdBuf(rmesa, __FUNCTION__);
+ rcommonFlushCmdBuf(rmesa, __func__);
goto again_alloc;
}
insert_at_head(&rmesa->dma.reserved, dma_bo);
int bytes, int alignment)
{
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s %d\n", __FUNCTION__, bytes);
+ fprintf(stderr, "%s %d\n", __func__, bytes);
if (rmesa->dma.flush)
rmesa->dma.flush(&rmesa->glCtx);
struct radeon_dma_bo *dma_bo;
struct radeon_dma_bo *temp;
if (RADEON_DEBUG & RADEON_DMA)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
foreach_s(dma_bo, temp, &rmesa->dma.free) {
remove_from_list(dma_bo);
return;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s %d\n", __FUNCTION__, return_bytes);
+ fprintf(stderr, "%s %d\n", __func__, return_bytes);
rmesa->dma.current_used -= return_bytes;
rmesa->dma.current_vertexptr = rmesa->dma.current_used;
}
++reserved;
fprintf(stderr, "%s: free %zu, wait %zu, reserved %zu, minimum_size: %zu\n",
- __FUNCTION__, free, wait, reserved, rmesa->dma.minimum_size);
+ __func__, free, wait, reserved, rmesa->dma.minimum_size);
}
/* move waiting bos to free list.
struct radeon_dma *dma = &rmesa->dma;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
dma->flush = NULL;
radeon_bo_unmap(rmesa->swtcl.bo);
GLuint bytes = vsize * nverts;
void *head;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
if(is_empty_list(&rmesa->dma.reserved)
||rmesa->dma.current_vertexptr + bytes > first_elem(&rmesa->dma.reserved)->bo->size) {
radeonContextPtr radeon = RADEON_CONTEXT( ctx );
int i;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
if (radeon->dma.flush) {
radeon->dma.flush(&radeon->glCtx);
fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
radeon_print(RADEON_TEXTURE, RADEON_TRACE,
"%s: HW doesn't support format %s as output format of attachment %d\n",
- __FUNCTION__, _mesa_get_format_name(mesa_format), i);
+ __func__, _mesa_get_format_name(mesa_format), i);
return;
}
}
int dwords = (rmesa->radeon.cmdbuf.cs->section_ndw - rmesa->radeon.cmdbuf.cs->section_cdw);
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
assert( rmesa->radeon.dma.flush == radeonFlushElts );
rmesa->radeon.dma.flush = NULL;
END_BATCH();
if (RADEON_DEBUG & RADEON_SYNC) {
- fprintf(stderr, "%s: Syncing\n", __FUNCTION__);
+ fprintf(stderr, "%s: Syncing\n", __func__);
radeonFinish( &rmesa->radeon.glCtx );
}
BATCH_LOCALS(&rmesa->radeon);
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s %d prim %x\n", __FUNCTION__, min_nr, primitive);
+ fprintf(stderr, "%s %d prim %x\n", __func__, min_nr, primitive);
assert((primitive & RADEON_CP_VC_CNTL_PRIM_WALK_IND));
if (RADEON_DEBUG & RADEON_RENDER)
fprintf(stderr, "%s: header prim %x \n",
- __FUNCTION__, primitive);
+ __func__, primitive);
assert(!rmesa->radeon.dma.flush);
rmesa->radeon.glCtx.Driver.NeedFlush |= FLUSH_STORED_VERTICES;
if (RADEON_DEBUG & (RADEON_PRIMS|RADEON_IOCTL))
fprintf(stderr, "%s: vertex_size 0x%x offset 0x%x \n",
- __FUNCTION__, vertex_size, offset);
+ __func__, vertex_size, offset);
BEGIN_BATCH(7);
OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, 2);
int i;
if (RADEON_DEBUG & RADEON_IOCTL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
BEGIN_BATCH(sz+2+(nr * 2));
OUT_BATCH_PACKET3(RADEON_CP_PACKET3_3D_LOAD_VBPNTR, sz - 1);
if ( swmask ) {
if (RADEON_DEBUG & RADEON_FALLBACKS)
- fprintf(stderr, "%s: swrast clear, mask: %x\n", __FUNCTION__, swmask);
+ fprintf(stderr, "%s: swrast clear, mask: %x\n", __func__, swmask);
_swrast_Clear( ctx, swmask );
}
int i;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
- __FUNCTION__, count, stride);
+ __func__, count, stride);
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
if (RADEON_DEBUG & RADEON_VERTS)
fprintf(stderr, "%s count %d stride %d\n",
- __FUNCTION__, count, stride);
+ __func__, count, stride);
for (i = 0; i < count; i++) {
out[0] = *(int *)data;
uint32_t *out;
if (RADEON_DEBUG & RADEON_VERTS)
- fprintf(stderr, "%s %d/%d\n", __FUNCTION__, count, size);
+ fprintf(stderr, "%s %d/%d\n", __func__, count, size);
switch (size) {
case 4: emitsize = 3; break;
#if 0
if (RADEON_DEBUG & RADEON_VERTS)
- _tnl_print_vert_flags( __FUNCTION__, inputs );
+ _tnl_print_vert_flags( __func__, inputs );
#endif
if (1) {
union emit_union *v = (union emit_union *)dest;
- radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __FUNCTION__);
+ radeon_print(RADEON_SWRENDER, RADEON_VERBOSE, "%s\n", __func__);
coord = (GLuint (*)[4])VB->AttribPtr[_TNL_ATTRIB_POS]->data;
coord_stride = VB->AttribPtr[_TNL_ATTRIB_POS]->stride;
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: Validating texture %p now, minLod = %d, maxLod = %d\n",
- __FUNCTION__, texObj ,t->minLod, t->maxLod);
+ __func__, texObj ,t->minLod, t->maxLod);
dst_miptree = get_biggest_matching_miptree(t, t->base.BaseLevel, t->base._MaxLevel);
radeon_try_alloc_miptree(rmesa, t);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: No matching miptree found, allocated new one %p\n",
- __FUNCTION__, t->mt);
+ __func__, t->mt);
} else {
radeon_miptree_reference(dst_miptree, &t->mt);
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
- "%s: Using miptree %p\n", __FUNCTION__, t->mt);
+ "%s: Using miptree %p\n", __func__, t->mt);
}
const unsigned faces = _mesa_num_tex_faces(texObj->Target);
radeon_print(RADEON_STATE, RADEON_VERBOSE,
"%s: query id %d, result %d\n",
- __FUNCTION__, query->Base.Id, (int) query->Base.Result);
+ __func__, query->Base.Id, (int) query->Base.Result);
radeon_bo_map(query->bo, GL_FALSE);
result = query->bo->ptr;
query->Base.Active = GL_FALSE;
query->Base.Ready = GL_TRUE;
- radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __FUNCTION__, query->Base.Id);
+ radeon_print(RADEON_STATE, RADEON_VERBOSE,"%s: query id %d\n", __func__, query->Base.Id);
return &query->Base;
}
{
struct radeon_query_object *query = (struct radeon_query_object *)q;
- radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
if (query->bo) {
radeon_bo_unref(query->bo);
if (radeon_bo_is_referenced_by_cs(query->bo, radeon->cmdbuf.cs))
ctx->Driver.Flush(ctx);
- radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, q->Id, query->bo, query->curr_offset);
+ radeon_print(RADEON_STATE, RADEON_VERBOSE, "%s: query id %d, bo %p, offset %d\n", __func__, q->Id, query->bo, query->curr_offset);
radeonQueryGetResult(ctx, q);
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
struct radeon_query_object *query = (struct radeon_query_object *)q;
- radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
assert(radeon->query.current == NULL);
if (query->emitted_begin == GL_FALSE)
return;
- radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __FUNCTION__, query->Base.Id, query->bo, query->curr_offset);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d, bo %p, offset %d\n", __func__, query->Base.Id, query->bo, query->curr_offset);
radeon_cs_space_check_with_bo(radeon->cmdbuf.cs,
query->bo,
{
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
- radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __FUNCTION__, q->Id);
+ radeon_print(RADEON_STATE, RADEON_NORMAL, "%s: query id %d\n", __func__, q->Id);
if (radeon->dma.flush)
radeon->dma.flush(&radeon->glCtx);
static void radeonCheckQuery(struct gl_context *ctx, struct gl_query_object *q)
{
- radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __FUNCTION__, q->Id);
+ radeon_print(RADEON_STATE, RADEON_TRACE, "%s: query id %d\n", __func__, q->Id);
\
#ifdef DRM_RADEON_GEM_BUSY
radeonContextPtr radeon = RADEON_CONTEXT(ctx);
/* Allocate the private area */
screen = calloc(1, sizeof(*screen));
if ( !screen ) {
- fprintf(stderr, "%s: Could not allocate memory for screen structure", __FUNCTION__);
+ fprintf(stderr, "%s: Could not allocate memory for screen structure", __func__);
fprintf(stderr, "leaving here\n");
return NULL;
}
{
struct gl_light *l = &ctx->Light.Light[p];
-/* fprintf(stderr, "%s\n", __FUNCTION__); */
+/* fprintf(stderr, "%s\n", __func__); */
if (l->Enabled) {
r100ContextPtr rmesa = R100_CONTEXT(ctx);
mask &= ~ctx->Light._ColorMaterialBitmask;
if (RADEON_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ fprintf(stderr, "%s\n", __func__);
if (mask & MAT_BIT_FRONT_EMISSION) {
GLuint p, flag;
if ( RADEON_DEBUG & RADEON_STATE )
- fprintf( stderr, "%s( %s = %s )\n", __FUNCTION__,
+ fprintf( stderr, "%s( %s = %s )\n", __func__,
_mesa_lookup_enum_by_nr( cap ),
state ? "GL_TRUE" : "GL_FALSE" );
RADEON_STATECHANGE( rmesa, tcl );
if (RADEON_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s %d BEFORE %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
+ fprintf(stderr, "%s %d BEFORE %x\n", __func__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
if (ctx->_NeedEyeCoords)
}
if (RADEON_DEBUG & RADEON_STATE)
- fprintf(stderr, "%s %d AFTER %x\n", __FUNCTION__, ctx->_NeedEyeCoords,
+ fprintf(stderr, "%s %d AFTER %x\n", __func__, ctx->_NeedEyeCoords,
rmesa->hw.tcl.cmd[TCL_LIGHT_MODEL_CTL]);
}
GLboolean has_material;
if (0)
- fprintf(stderr, "%s, newstate: %x\n", __FUNCTION__, rmesa->radeon.NewGLState);
+ fprintf(stderr, "%s, newstate: %x\n", __func__, rmesa->radeon.NewGLState);
/* Validate state:
*/
extern void radeonFallback( struct gl_context *ctx, GLuint bit, GLboolean mode );
#define FALLBACK( rmesa, bit, mode ) do { \
if ( 0 ) fprintf( stderr, "FALLBACK in %s: #%d=%d\n", \
- __FUNCTION__, bit, mode ); \
+ __func__, bit, mode ); \
radeonFallback( &rmesa->radeon.glCtx, bit, mode ); \
} while (0)
rmesa->radeon.swtcl.vertex_size /= 4;
rmesa->radeon.tnl_index_bitset = index_bitset;
radeon_print(RADEON_SWRENDER, RADEON_VERBOSE,
- "%s: vertex_size= %d floats\n", __FUNCTION__, rmesa->radeon.swtcl.vertex_size);
+ "%s: vertex_size= %d floats\n", __func__, rmesa->radeon.swtcl.vertex_size);
}
}
if (rcommonEnsureCmdBufSpace(&rmesa->radeon,
state_size +
(scissor_size + prims_size + vertex_size),
- __FUNCTION__))
+ __func__))
rmesa->radeon.swtcl.emit_prediction = radeonCountStateEmitSize( &rmesa->radeon );
else
rmesa->radeon.swtcl.emit_prediction = state_size;
space_required += SCISSOR_BUFSZ;
}
/* flush the buffer in case we need more than is left. */
- if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required, __FUNCTION__))
+ if (rcommonEnsureCmdBufSpace(&rmesa->radeon, space_required, __func__))
return space_required + radeonCountStateEmitSize( &rmesa->radeon );
else
return space_required + state_size;
// if (rmesa->swtcl.indexed_verts.buf)
// radeonReleaseDmaRegion( rmesa, &rmesa->swtcl.indexed_verts,
- // __FUNCTION__ );
+ // __func__ );
if (RADEON_DEBUG & RADEON_FALLBACKS)
fprintf(stderr, "Radeon end tcl fallback\n");
is_clamp_to_border = GL_TRUE;
break;
default:
- _mesa_problem(NULL, "bad S wrap mode in %s", __FUNCTION__);
+ _mesa_problem(NULL, "bad S wrap mode in %s", __func__);
}
if (t->base.Target != GL_TEXTURE_1D) {
is_clamp_to_border = GL_TRUE;
break;
default:
- _mesa_problem(NULL, "bad T wrap mode in %s", __FUNCTION__);
+ _mesa_problem(NULL, "bad T wrap mode in %s", __func__);
}
}
if ( RADEON_DEBUG & RADEON_STATE ) {
fprintf( stderr, "%s( %s )\n",
- __FUNCTION__, _mesa_lookup_enum_by_nr( pname ) );
+ __func__, _mesa_lookup_enum_by_nr( pname ) );
}
switch ( pname ) {
{
radeonTexObj* t = radeon_tex_obj(texObj);
- radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s( %s )\n", __FUNCTION__,
+ radeon_print(RADEON_TEXTURE, RADEON_VERBOSE, "%s( %s )\n", __func__,
_mesa_lookup_enum_by_nr( pname ) );
switch ( pname ) {
int i;
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
- "%s( %p (target = %s) )\n", __FUNCTION__, (void *)texObj,
+ "%s( %p (target = %s) )\n", __func__, (void *)texObj,
_mesa_lookup_enum_by_nr( texObj->Target ) );
if ( rmesa ) {
if (0) {
fprintf(stderr, "%s: copying to face %d, level %d\n",
- __FUNCTION__, face, level);
+ __func__, face, level);
fprintf(stderr, "to: x %d, y %d, offset %d\n", dstx, dsty, (uint32_t) dst_offset);
fprintf(stderr, "from (%dx%d) width %d, height %d, offset %d, pitch %d\n",
x, y, rrb->base.Base.Width, rrb->base.Base.Height, (uint32_t) src_offset, rrb->pitch/rrb->cpp);
if ( RADEON_DEBUG & RADEON_TEXTURE ) {
- fprintf( stderr, "%s( %p, %d )\n", __FUNCTION__, (void *)ctx, unit );
+ fprintf( stderr, "%s( %p, %d )\n", __func__, (void *)ctx, unit );
}
/* Set the texture environment state. Isn't this nice and clean?
t->pp_txfilter |= table[ firstImage->TexFormat ].filter;
} else {
_mesa_problem(NULL, "unexpected texture format in %s",
- __FUNCTION__);
+ __func__);
return GL_FALSE;
}
}
radeon_print(RADEON_TEXTURE, RADEON_NORMAL,
"%s: texObj %p, texImage %p, "
"texObj miptree doesn't match, allocated new miptree %p\n",
- __FUNCTION__, texObj, texImage, t->mt);
+ __func__, texObj, texImage, t->mt);
}
/* Miptree alocation may have failed,