cc.cc2.depth_write_enable = ctx->Depth.Mask;
}
- if (intel->stats_wm || (INTEL_DEBUG & DEBUG_STATS))
+ if (intel->stats_wm || unlikely(INTEL_DEBUG & DEBUG_STATS))
cc.cc5.statistics_enable = 1;
/* CACHE_NEW_CC_VP */
*/
program = brw_get_program(&c.func, &program_size);
- if (INTEL_DEBUG & DEBUG_CLIP) {
+ if (unlikely(INTEL_DEBUG & DEBUG_CLIP)) {
printf("clip:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i],
intel->gen);
printf("\n");
- }
+ }
/* Upload
*/
clip.thread4.max_threads = 1 - 1;
}
- if (INTEL_DEBUG & DEBUG_SINGLE_THREAD)
+ if (unlikely(INTEL_DEBUG & DEBUG_SINGLE_THREAD))
clip.thread4.max_threads = 0;
- if (INTEL_DEBUG & DEBUG_STATS)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS))
clip.thread4.stats_enable = 1;
clip.clip5.userclip_enable_flags = 0x7f;
#include "intel_batchbuffer.h"
-#define FILE_DEBUG_FLAG DEBUG_BATCH
+#define FILE_DEBUG_FLAG DEBUG_PRIMS
static GLuint prim_to_hw_prim[GL_POLYGON+1] = {
_3DPRIM_POINTLIST,
struct gl_context *ctx = &brw->intel.ctx;
GLenum mode = prim->mode;
- if (INTEL_DEBUG & DEBUG_PRIMS)
- printf("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
+ DBG("PRIM: %s\n", _mesa_lookup_enum_by_nr(prim->mode));
/* Slight optimization to avoid the GS program when not needed:
*/
struct brw_3d_primitive prim_packet;
struct intel_context *intel = &brw->intel;
- if (INTEL_DEBUG & DEBUG_PRIMS)
- printf("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
- prim->start, prim->count);
+ DBG("PRIM: %s %d %d\n", _mesa_lookup_enum_by_nr(prim->mode),
+ prim->start, prim->count);
prim_packet.header.opcode = CMD_3D_PRIM;
prim_packet.header.length = sizeof(prim_packet)/4 - 2;
static GLuint get_surface_type( GLenum type, GLuint size,
GLenum format, GLboolean normalized )
{
- if (INTEL_DEBUG & DEBUG_VERTS)
+ if (unlikely(INTEL_DEBUG & DEBUG_VERTS))
printf("type %s size %d normalized %d\n",
_mesa_lookup_enum_by_nr(type), size, normalized);
const char *last_annotation_string = NULL;
ir_instruction *last_annotation_ir = NULL;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("Native code for fragment shader %d:\n",
ctx->Shader.CurrentFragmentProgram->Name);
}
fs_inst *inst = (fs_inst *)iter.get();
struct brw_reg src[3], dst;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
if (last_annotation_ir != inst->ir) {
last_annotation_ir = inst->ir;
if (last_annotation_ir) {
this->fail = true;
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
for (unsigned int i = last_native_inst; i < p->nr_insn; i++) {
if (0) {
printf("0x%08x 0x%08x 0x%08x 0x%08x ",
*/
c->dispatch_width = 8;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("GLSL IR for native fragment shader %d:\n", prog->Name);
_mesa_print_ir(shader->ir, NULL);
printf("\n");
*/
program = brw_get_program(&c.func, &program_size);
- if (INTEL_DEBUG & DEBUG_GS) {
- int i;
+ if (unlikely(INTEL_DEBUG & DEBUG_GS)) {
+ int i;
printf("gs:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
if (intel->gen == 5)
gs.thread4.rendering_enable = 1;
- if (INTEL_DEBUG & DEBUG_STATS)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS))
gs.thread4.stats_enable = 1;
bo = brw_upload_cache(&brw->cache, BRW_GS_UNIT,
memset(&vfs, 0, sizeof(vfs));
vfs.opcode = brw->CMD_VF_STATISTICS;
- if (INTEL_DEBUG & DEBUG_STATS)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS))
vfs.statistics_enable = 1;
BRW_BATCH_STRUCT(brw, &vfs);
*/
program = brw_get_program(&c.func, &program_size);
- if (INTEL_DEBUG & DEBUG_SF) {
+ if (unlikely(INTEL_DEBUG & DEBUG_SF)) {
printf("sf:\n");
for (i = 0; i < program_size / sizeof(struct brw_instruction); i++)
brw_disasm(stdout, &((struct brw_instruction *)program)[i],
sf.thread4.max_threads = MIN2(chipset_max_threads, key->nr_urb_entries) - 1;
- if (INTEL_DEBUG & DEBUG_SINGLE_THREAD)
+ if (unlikely(INTEL_DEBUG & DEBUG_SINGLE_THREAD))
sf.thread4.max_threads = 0;
- if (INTEL_DEBUG & DEBUG_STATS)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS))
sf.thread4.stats_enable = 1;
/* CACHE_NEW_SF_VP */
#include "intel_batchbuffer.h"
#include "brw_wm.h"
+#define FILE_DEBUG_FLAG DEBUG_STATE
static GLuint
hash_key(struct brw_cache_item *item)
*(void **)aux_return = (void *)((char *)item->key + item->key_size);
}
- if (INTEL_DEBUG & DEBUG_STATE)
- printf("upload %s: %d bytes to cache id %d\n",
- cache->name[cache_id],
- data_size, cache_id);
+ DBG("upload %s: %d bytes to cache id %d\n",
+ cache->name[cache_id],
+ data_size, cache_id);
/* Copy data to the buffer */
drm_intel_bo_subdata(bo, 0, data_size, data);
struct brw_cache_item *c, *next;
GLuint i;
- if (INTEL_DEBUG & DEBUG_STATE)
- printf("%s\n", __FUNCTION__);
+ DBG("%s\n", __FUNCTION__);
for (i = 0; i < cache->size; i++) {
for (c = cache->items[i]; c; c = next) {
void
brw_state_cache_check_size(struct brw_context *brw)
{
- if (INTEL_DEBUG & DEBUG_STATE)
- printf("%s (n_items=%d)\n", __FUNCTION__, brw->cache.n_items);
+ DBG("%s (n_items=%d)\n", __FUNCTION__, brw->cache.n_items);
/* un-tuned guess. Each object is generally a page, so 1000 of them is 4 MB of
* state cache.
{
GLuint i;
- if (INTEL_DEBUG & DEBUG_STATE)
- printf("%s\n", __FUNCTION__);
+ DBG("%s\n", __FUNCTION__);
brw_clear_cache(brw, cache);
for (i = 0; i < BRW_MAX_CACHE; i++) {
brw_clear_validated_bos(brw);
- if (INTEL_DEBUG) {
+ if (unlikely(INTEL_DEBUG)) {
/* Debug version which enforces various sanity checks on the
* state flags which are generated and checked to help ensure
* state atoms are ordered correctly in the list.
}
}
- if (INTEL_DEBUG & DEBUG_STATE) {
+ if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
brw_update_dirty_count(mesa_bits, state->mesa);
brw_update_dirty_count(brw_bits, state->brw);
brw_update_dirty_count(cache_bits, state->cache);
exit(1);
}
- if (INTEL_DEBUG & (DEBUG_URB|DEBUG_FALLBACKS))
+ if (unlikely(INTEL_DEBUG & (DEBUG_URB|DEBUG_FALLBACKS)))
printf("URB CONSTRAINED\n");
}
done:
- if (INTEL_DEBUG & DEBUG_URB)
+ if (unlikely(INTEL_DEBUG & DEBUG_URB))
printf("URB fence: %d ..VS.. %d ..GS.. %d ..CLP.. %d ..SF.. %d ..CS.. %d\n",
brw->urb.vs_start,
brw->urb.gs_start,
c->prog_data.total_grf = reg;
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
printf("%s reg = %d\n", __FUNCTION__, reg);
GLuint index;
GLuint file;
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
printf("vs-mesa:\n");
_mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
GL_TRUE);
brw_optimize(p);
- if (INTEL_DEBUG & DEBUG_VS) {
+ if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
int i;
printf("vs-native:\n");
*/
vs.vs5.sampler_count = 0;
- if (INTEL_DEBUG & DEBUG_STATS)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS))
vs.thread4.stats_enable = 1;
/* Vertex program always enabled:
c->prog_data.total_scratch = 0;
}
- if (INTEL_DEBUG & DEBUG_WM)
+ if (unlikely(INTEL_DEBUG & DEBUG_WM))
fprintf(stderr, "\n");
/* get the program
brw_remove_grf_to_mrf_moves(p);
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
int i;
printf("wm-native:\n");
struct brw_fragment_program *fp = c->fp;
GLuint insn;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("pre-fp:\n");
_mesa_fprint_program_opt(stdout, &fp->program.Base, PROG_PRINT_DEBUG,
GL_TRUE);
}
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("pass_fp:\n");
print_insns( c->prog_instructions, c->nr_fp_insns );
printf("\n");
{
int i;
- if (INTEL_DEBUG & DEBUG_GLSL_FORCE)
+ if (unlikely(INTEL_DEBUG & DEBUG_GLSL_FORCE))
return GL_TRUE;
for (i = 0; i < fp->Base.NumInstructions; i++) {
}
post_wm_emit(c);
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("wm-native:\n");
for (i = 0; i < p->nr_insn; i++)
brw_disasm(stdout, &p->store[i], intel->gen);
*/
void brw_wm_glsl_emit(struct brw_context *brw, struct brw_wm_compile *c)
{
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
printf("brw_wm_glsl_emit:\n");
}
/* actual code generation */
brw_wm_emit_glsl(brw, c);
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "brw_wm_glsl_emit done");
}
}
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass0");
}
}
track_arg(c, inst, 2, read2);
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass1");
}
}
}
}
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass2");
}
c->state = PASS2_DONE;
- if (INTEL_DEBUG & DEBUG_WM) {
+ if (unlikely(INTEL_DEBUG & DEBUG_WM)) {
brw_wm_print_program(c, "pass2/done");
}
}
wm.wm5.line_stipple = key->line_stipple;
- if (INTEL_DEBUG & DEBUG_STATS || key->stats_wm)
+ if (unlikely(INTEL_DEBUG & DEBUG_STATS) || key->stats_wm)
wm.wm4.stats_enable = 1;
bo = brw_upload_cache(&brw->cache, BRW_WM_UNIT,
(x_off & 0xffff) | (y_off << 16));
}
- if (INTEL_DEBUG & DEBUG_BATCH) {
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
drm_intel_bo_map(batch->buf, GL_FALSE);
intel_decode(batch->buf->virtual, used / 4, batch->buf->offset,
intel->intelScreen->deviceID, GL_TRUE);
if (used == 0)
return;
- if (INTEL_DEBUG & DEBUG_BATCH)
+ if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
fprintf(stderr, "%s:%d: Batchbuffer flush with %db used\n", file, line,
used);
do_flush_locked(batch, used);
- if (INTEL_DEBUG & DEBUG_SYNC) {
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
fprintf(stderr, "waiting for idle\n");
drm_intel_bo_map(batch->buf, GL_TRUE);
drm_intel_bo_unmap(batch->buf);
[BUFFER_COLOR7] = "color7",
};
+static void
+debug_mask(const char *name, GLbitfield mask)
+{
+ GLuint i;
+
+ if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
+ DBG("%s clear:", name);
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ if (mask & (1 << i))
+ DBG(" %s", buffer_names[i]);
+ }
+ DBG("\n");
+ }
+}
+
/**
* Called by ctx->Driver.Clear.
*/
GLbitfield blit_mask = 0;
GLbitfield swrast_mask = 0;
struct gl_framebuffer *fb = ctx->DrawBuffer;
- GLuint i;
if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
intel->front_buffer_dirty = GL_TRUE;
}
if (blit_mask) {
- if (INTEL_DEBUG & DEBUG_BLIT) {
- DBG("blit clear:");
- for (i = 0; i < BUFFER_COUNT; i++) {
- if (blit_mask & (1 << i))
- DBG(" %s", buffer_names[i]);
- }
- DBG("\n");
- }
+ debug_mask("blit", blit_mask);
intelClearWithBlit(ctx, blit_mask);
}
if (tri_mask) {
- if (INTEL_DEBUG & DEBUG_BLIT) {
- DBG("tri clear:");
- for (i = 0; i < BUFFER_COUNT; i++) {
- if (tri_mask & (1 << i))
- DBG(" %s", buffer_names[i]);
- }
- DBG("\n");
- }
-
+ debug_mask("tri", tri_mask);
_mesa_meta_Clear(&intel->ctx, tri_mask);
}
if (swrast_mask) {
- if (INTEL_DEBUG & DEBUG_BLIT) {
- DBG("swrast clear:");
- for (i = 0; i < BUFFER_COUNT; i++) {
- if (swrast_mask & (1 << i))
- DBG(" %s", buffer_names[i]);
- }
- DBG("\n");
- }
+ debug_mask("swrast", swrast_mask);
_swrast_Clear(ctx, swrast_mask);
}
}
* thus ignore the invalidate. */
drawable->lastStamp = drawable->dri2.stamp;
- if (INTEL_DEBUG & DEBUG_DRI)
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI))
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
screen = intel->intelScreen->driScrnPriv;
if (rb->region && rb->region->name == buffers[i].name)
continue;
- if (INTEL_DEBUG & DEBUG_DRI)
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI))
fprintf(stderr,
"attaching buffer %d, at %d, cpp %d, pitch %d\n",
buffers[i].name, buffers[i].attachment,
buffers[i].cpp, buffers[i].pitch);
if (buffers[i].attachment == __DRI_BUFFER_STENCIL && depth_region) {
- if (INTEL_DEBUG & DEBUG_DRI)
+ if (unlikely(INTEL_DEBUG & DEBUG_DRI))
fprintf(stderr, "(reusing depth buffer as stencil)\n");
intel_region_reference(®ion, depth_region);
}
#define INTEL_MAX_FIXUP 64
+#ifndef likely
+#ifdef __GNUC__
+#define likely(expr) (__builtin_expect(expr, 1))
+#define unlikely(expr) (__builtin_expect(expr, 0))
+#else
+#define likely(expr) (expr)
+#define unlikely(expr) (expr)
+#endif
+#endif
+
struct intel_sync_object {
struct gl_sync_object Base;
#define DEBUG_CLIP 0x8000000
#define DBG(...) do { \
- if (INTEL_DEBUG & FILE_DEBUG_FLAG) \
+ if (unlikely(INTEL_DEBUG & FILE_DEBUG_FLAG)) \
printf(__VA_ARGS__); \
} while(0)
+#define fallback_debug(...) do { \
+ if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS)) \
+ printf(__VA_ARGS__); \
+} while(0)
+
#define PCI_CHIP_845_G 0x2562
#define PCI_CHIP_I830_M 0x3577
#define PCI_CHIP_I855_GM 0x3582
return GL_TRUE;
}
- if (INTEL_DEBUG & DEBUG_PIXEL)
- fprintf(stderr, "%s: bad format for blit (cpp %d, type %s format %s)\n",
- __FUNCTION__, region->cpp,
- _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
+ DBG("%s: bad format for blit (cpp %d, type %s format %s)\n",
+ __FUNCTION__, region->cpp,
+ _mesa_lookup_enum_by_nr(type), _mesa_lookup_enum_by_nr(format));
return GL_FALSE;
}
GLint incr;
GLuint count = 0;
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
- __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
+ DBG("%s %d,%d %dx%d bitmap %dx%d skip %d src_offset %d mask %d\n",
+ __FUNCTION__, x,y,w,h,width,height,unpack->SkipPixels, src_offset, mask);
if (invert) {
first = h-1;
}
out:
- if (INTEL_DEBUG & DEBUG_SYNC)
+ if (unlikely(INTEL_DEBUG & DEBUG_SYNC))
intel_batchbuffer_flush(intel->batch);
if (_mesa_is_bufferobj(unpack->BufferObj)) {
return GL_TRUE;
}
+
/* There are a large number of possible ways to implement bitmap on
* this hardware, most of them have some sort of drawback. Here are a
* few that spring to mind:
GLboolean flip = GL_FALSE;
if (type == GL_DEPTH || type == GL_STENCIL) {
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "glCopyPixels() fallback: GL_DEPTH || GL_STENCIL\n");
+ fallback_debug("glCopyPixels() fallback: GL_DEPTH || GL_STENCIL\n");
return GL_FALSE;
}
GLsizei width, GLsizei height,
GLint destx, GLint desty, GLenum type)
{
- if (INTEL_DEBUG & DEBUG_PIXEL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ DBG("%s\n", __FUNCTION__);
if (do_blit_copypixels(ctx, srcx, srcy, width, height, destx, desty, type))
return;
#include "intel_pixel.h"
#include "intel_buffer_objects.h"
+#define FILE_DEBUG_FLAG DEBUG_PIXEL
+
/* For many applications, the new ability to pull the source buffers
* back out of the GTT and then do the packing/conversion operations
* in software will be as much of an improvement as trying to get the
GLboolean all;
GLint dst_x, dst_y;
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s\n", __FUNCTION__);
+ DBG("%s\n", __FUNCTION__);
if (!src)
return GL_FALSE;
if (!_mesa_is_bufferobj(pack->BufferObj)) {
/* PBO only for now:
*/
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s - not PBO\n", __FUNCTION__);
+ DBG("%s - not PBO\n", __FUNCTION__);
return GL_FALSE;
}
if (ctx->_ImageTransferState ||
!intel_check_blit_format(src, format, type)) {
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s - bad format for blit\n", __FUNCTION__);
+ DBG("%s - bad format for blit\n", __FUNCTION__);
return GL_FALSE;
}
if (pack->Alignment != 1 || pack->SwapBytes || pack->LsbFirst) {
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s: bad packing params\n", __FUNCTION__);
+ DBG("%s: bad packing params\n", __FUNCTION__);
return GL_FALSE;
}
rowLength = width;
if (pack->Invert) {
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
+ DBG("%s: MESA_PACK_INVERT not done yet\n", __FUNCTION__);
return GL_FALSE;
}
else {
return GL_FALSE;
}
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s - DONE\n", __FUNCTION__);
+ DBG("%s - DONE\n", __FUNCTION__);
return GL_TRUE;
}
struct intel_context *intel = intel_context(ctx);
GLboolean dirty;
- if (INTEL_DEBUG & DEBUG_PIXEL)
- fprintf(stderr, "%s\n", __FUNCTION__);
+ DBG("%s\n", __FUNCTION__);
intel_flush(ctx);
(ctx, x, y, width, height, format, type, pack, pixels))
return;
- if (INTEL_DEBUG & DEBUG_PIXEL)
- printf("%s: fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s: fallback to swrast\n", __FUNCTION__);
/* Update Mesa state before calling down into _swrast_ReadPixels, as
* the spans code requires the computed buffer states to be up to date,
struct intel_context *intel = intel_context(ctx);
struct intel_texture_object *intelObj = intel_texture_object(texObj);
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
intel_tex_map_level_images(intel, intelObj, texObj->BaseLevel);
_mesa_generate_mipmap(ctx, target, texObj);
const struct intel_region *src = get_teximage_source(intel, internalFormat);
if (!intelImage->mt || !src || !src->buffer) {
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
+ if (unlikely(INTEL_DEBUG & DEBUG_FALLBACKS))
fprintf(stderr, "%s fail %p %p (0x%08x)\n",
__FUNCTION__, intelImage->mt, src, internalFormat);
return GL_FALSE;
}
if (intelImage->mt->cpp != src->cpp) {
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s fail %d vs %d cpp\n",
- __FUNCTION__, intelImage->mt->cpp, src->cpp);
+ fallback_debug("%s fail %d vs %d cpp\n",
+ __FUNCTION__, intelImage->mt->cpp, src->cpp);
return GL_FALSE;
}
return;
fail:
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
_mesa_meta_CopyTexImage1D(ctx, target, level, internalFormat, x, y,
width, border);
}
return;
fail:
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
_mesa_meta_CopyTexImage2D(ctx, target, level, internalFormat, x, y,
width, height, border);
}
if (!do_copy_texsubimage(intel_context(ctx), target,
intel_texture_image(texImage),
internalFormat, xoffset, 0, x, y, width, 1)) {
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
_mesa_meta_CopyTexSubImage1D(ctx, target, level, xoffset, x, y, width);
}
}
internalFormat,
xoffset, yoffset, x, y, width, height)) {
- if (INTEL_DEBUG & DEBUG_FALLBACKS)
- fprintf(stderr, "%s - fallback to swrast\n", __FUNCTION__);
+ fallback_debug("%s - fallback to swrast\n", __FUNCTION__);
_mesa_meta_CopyTexSubImage2D(ctx, target, level,
xoffset, yoffset, x, y, width, height);
}