{
BATCH_LOCALS;
- BEGIN_BATCH(40, 0);
+ BEGIN_BATCH(40, IGNORE_CLIPRECTS);
OUT_BATCH(_3DSTATE_DFLT_DIFFUSE_CMD);
OUT_BATCH(0);
#define emit( intel, state, size ) \
-do { \
- int k; \
- BEGIN_BATCH(size / sizeof(GLuint), 0); \
- for (k = 0 ; k < size / sizeof(GLuint) ; k++) { \
- if (0) _mesa_printf(" 0x%08x\n", state[k]); \
- OUT_BATCH(state[k]); \
- } \
- ADVANCE_BATCH(); \
-} while (0)
+ intel_batchbuffer_data(intel->batch, state, size, IGNORE_CLIPRECTS )
static GLuint
get_dirty(struct i830_hw_state *state)
if (dirty & I830_UPLOAD_CTX) {
DBG("I830_UPLOAD_CTX:\n");
- emit(i830, state->Ctx, sizeof(state->Ctx));
+ emit(intel, state->Ctx, sizeof(state->Ctx));
}
if (dirty & I830_UPLOAD_BUFFERS) {
DBG("I830_UPLOAD_BUFFERS:\n");
- BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, 0);
+ BEGIN_BATCH(I830_DEST_SETUP_SIZE + 2, IGNORE_CLIPRECTS);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I830_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer,
if (dirty & I830_UPLOAD_STIPPLE) {
DBG("I830_UPLOAD_STIPPLE:\n");
- emit(i830, state->Stipple, sizeof(state->Stipple));
+ emit(intel, state->Stipple, sizeof(state->Stipple));
}
for (i = 0; i < I830_TEX_UNITS; i++) {
if ((dirty & I830_UPLOAD_TEX(i))) {
DBG("I830_UPLOAD_TEX(%d):\n", i);
- BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, 0);
+ BEGIN_BATCH(I830_TEX_SETUP_SIZE + 1, IGNORE_CLIPRECTS);
OUT_BATCH(state->Tex[i][I830_TEXREG_TM0LI]);
if (state->tex_buffer[i]) {
if (dirty & I830_UPLOAD_TEXBLEND(i)) {
DBG("I830_UPLOAD_TEXBLEND(%d): %d words\n", i,
state->TexBlendWordsUsed[i]);
- emit(i830, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
+ emit(intel, state->TexBlend[i], state->TexBlendWordsUsed[i] * 4);
}
}
{
BATCH_LOCALS;
- BEGIN_BATCH(200, 0);
+ BEGIN_BATCH(200, IGNORE_CLIPRECTS);
OUT_BATCH(_3DSTATE_AA_CMD |
AA_LINE_ECAAR_WIDTH_ENABLE |
#define emit(intel, state, size ) \
- intel_batchbuffer_data(intel->batch, state, size, 0 )
+ intel_batchbuffer_data(intel->batch, state, size, IGNORE_CLIPRECTS )
static GLuint
get_dirty(struct i915_hw_state *state)
if (dirty & I915_UPLOAD_BUFFERS) {
if (INTEL_DEBUG & DEBUG_STATE)
fprintf(stderr, "I915_UPLOAD_BUFFERS:\n");
- BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, 0);
+ BEGIN_BATCH(I915_DEST_SETUP_SIZE + 2, IGNORE_CLIPRECTS);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR0]);
OUT_BATCH(state->Buffer[I915_DESTREG_CBUFADDR1]);
OUT_RELOC(state->draw_region->buffer,
if (dirty & I915_UPLOAD_TEX(i))
nr++;
- BEGIN_BATCH(2 + nr * 3, 0);
+ BEGIN_BATCH(2 + nr * 3, IGNORE_CLIPRECTS);
OUT_BATCH(_3DSTATE_MAP_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0; i < I915_TEX_UNITS; i++)
}
ADVANCE_BATCH();
- BEGIN_BATCH(2 + nr * 3, 0);
+ BEGIN_BATCH(2 + nr * 3, IGNORE_CLIPRECTS);
OUT_BATCH(_3DSTATE_SAMPLER_STATE | (3 * nr));
OUT_BATCH((dirty & I915_UPLOAD_TEX_ALL) >> I915_UPLOAD_TEX_0_SHIFT);
for (i = 0; i < I915_TEX_UNITS; i++)
if (INTEL_DEBUG & DEBUG_LOCK)
_mesa_printf("%s - unlocked\n", __progname);
-}
+
+ /**
+ * Nothing should be left in batch outside of LOCK/UNLOCK which references
+ * cliprects.
+ */
+ assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
+}
fprintf(stderr, "%s %s\n", __FUNCTION__, _mesa_lookup_enum_by_nr(prim));
INTEL_FIREVERTICES(intel);
intel->vtbl.reduced_primitive_state(intel, reduced_prim[prim]);
- intelStartInlinePrimitive(intel, hw_prim[prim], INTEL_BATCH_CLIPRECTS);
+ intelStartInlinePrimitive(intel, hw_prim[prim], LOOP_CLIPRECTS);
}
/* _mesa_printf("%s *", __progname); */
- intel_wait_flips(intel, batch_flags);
+ intel_wait_flips(intel);
/* Emit a slot which will be filled with the inline primitive
* command later.
intelWrapInlinePrimitive(struct intel_context *intel)
{
GLuint prim = intel->prim.primitive;
- GLuint cliprects_enable = intel->batch->cliprects_enable;
+ enum cliprect_mode cliprect_mode = intel->batch->cliprect_mode;
intel_flush_inline_primitive(intel);
intel_batchbuffer_flush(intel->batch);
- intelStartInlinePrimitive(intel, prim, cliprects_enable); /* ??? */
+ intelStartInlinePrimitive(intel, prim, cliprect_mode); /* ??? */
}
GLuint *
if (hwprim != intel->prim.primitive) {
INTEL_FIREVERTICES(intel);
- intelStartInlinePrimitive(intel, hwprim, INTEL_BATCH_CLIPRECTS);
+ intelStartInlinePrimitive(intel, hwprim, LOOP_CLIPRECTS);
}
}
if (!was_locked)
LOCK_HARDWARE(intel);
- /* All 3d primitives should be emitted with INTEL_BATCH_CLIPRECTS,
+ /* All 3d primitives should be emitted with LOOP_CLIPRECTS,
* otherwise the drawing origin (DR4) might not be set correctly.
*/
- intelStartInlinePrimitive(intel, PRIM3D_TRIFAN, INTEL_BATCH_CLIPRECTS);
+ intelStartInlinePrimitive(intel, PRIM3D_TRIFAN, LOOP_CLIPRECTS);
vb = (union fi *) intelExtendInlinePrimitive(intel, n * 6);
for (i = 0; i < n; i++) {
void brwUpdateTextureState( struct intel_context *intel );
void brw_FrameBufferTexInit( struct brw_context *brw );
void brw_FrameBufferTexDestroy( struct brw_context *brw );
+void brw_validate_textures( struct brw_context *brw );
/*======================================================================
* brw_metaops.c
brw->curbe.tracked_state.dirty.mesa |= fp->param_state;
if (sz == 0) {
- BEGIN_BATCH(2, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(2, IGNORE_CLIPRECTS);
OUT_BATCH((CMD_CONST_BUFFER << 16) | (2 - 2));
OUT_BATCH(0);
ADVANCE_BATCH();
* flushes as necessary when doublebuffering of CURBEs isn't
* possible.
*/
- BEGIN_BATCH(2, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(2, IGNORE_CLIPRECTS);
OUT_BATCH((CMD_CONST_BUFFER << 16) | (1 << 8) | (2 - 2));
OUT_RELOC(brw->curbe.curbe_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
(sz - 1));
if (prim_packet.verts_per_instance) {
intel_batchbuffer_data( brw->intel.batch, &prim_packet,
- sizeof(prim_packet), INTEL_BATCH_CLIPRECTS);
+ sizeof(prim_packet), LOOP_CLIPRECTS);
}
}
if (ctx->NewState)
_mesa_update_state( ctx );
+ brw_validate_textures( brw );
+
/* Bind all inputs, derive varying and size information:
*/
brw_merge_inputs( brw, arrays );
vbp.header.bits.length = (1 + nr_enabled * 4) - 2;
vbp.header.bits.opcode = CMD_VERTEX_BUFFER;
- BEGIN_BATCH(vbp.header.bits.length+2, 0);
+ BEGIN_BATCH(vbp.header.bits.length+2, IGNORE_CLIPRECTS);
OUT_BATCH( vbp.header.dword );
for (i = 0; i < nr_enabled; i++) {
ib.header.bits.cut_index_enable = 0;
- BEGIN_BATCH(4, 0);
+ BEGIN_BATCH(4, IGNORE_CLIPRECTS);
OUT_BATCH( ib.header.dword );
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, offset);
OUT_RELOC( buffer, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
{
struct intel_context *intel = &brw->intel;
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(6, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
OUT_BATCH(0); /* vs */
OUT_BATCH(0); /* gs */
{
struct intel_context *intel = &brw->intel;
- BEGIN_BATCH(7, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(7, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
OUT_RELOC(brw->vs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
if (brw->gs.prog_active)
struct intel_region *region = brw->state.depth_region;
if (region == NULL) {
- BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(5, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
(BRW_SURFACE_NULL << 29));
return;
}
- BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(5, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
OUT_BATCH(((region->pitch * region->cpp) - 1) |
(format << 18) |
/* Output the structure (brw_state_base_address) directly to the
* batchbuffer, so we can emit relocations inline.
*/
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(6, IGNORE_CLIPRECTS);
OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
OUT_BATCH(1); /* General state base address */
OUT_BATCH(1); /* Surface state base address */
/***********************************************************************
* brw_state_batch.c
*/
-#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data( brw->intel.batch, (s), sizeof(*(s)), 0)
+#define BRW_BATCH_STRUCT(brw, s) intel_batchbuffer_data( brw->intel.batch, (s), sizeof(*(s)), IGNORE_CLIPRECTS)
#define BRW_CACHED_BATCH_STRUCT(brw, s) brw_cached_batch_struct( brw, (s), sizeof(*(s)) )
GLboolean brw_cached_batch_struct( struct brw_context *brw,
struct header *newheader = (struct header *)data;
if (brw->emit_state_always) {
- intel_batchbuffer_data(brw->intel.batch, data, sz, 0);
+ intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS);
return GL_TRUE;
}
emit:
memcpy(item->header, newheader, sz);
- intel_batchbuffer_data(brw->intel.batch, data, sz, 0);
+ intel_batchbuffer_data(brw->intel.batch, data, sz, IGNORE_CLIPRECTS);
return GL_TRUE;
}
#include "intel_context.h"
#include "intel_ioctl.h"
#include "intel_regions.h"
+#include "intel_tex.h"
#include "brw_context.h"
#include "brw_defines.h"
brw->intel.ctx.Driver.DeleteTexture( &brw->intel.ctx,
brw->intel.frame_buffer_texobj );
}
+
+/**
+ * Finalizes all textures, completing any rendering that needs to be done
+ * to prepare them.
+ */
+void brw_validate_textures( struct brw_context *brw )
+{
+ struct intel_context *intel = &brw->intel;
+ int i;
+
+ for (i = 0; i < BRW_MAX_TEX_UNIT; i++) {
+ struct gl_texture_unit *texUnit = &brw->attribs.Texture->Unit[i];
+
+ if (texUnit->_ReallyEnabled) {
+ intel_finalize_mipmap_tree(intel, i);
+ }
+ }
+}
/* _NEW_TEXTURE, BRW_NEW_TEXDATA
*/
- if (texUnit->_ReallyEnabled &&
- intel_finalize_mipmap_tree(intel, i))
- {
+ if (texUnit->_ReallyEnabled) {
brw_update_texture_surface(ctx, i);
brw->wm.nr_surfaces = i+2;
}
DRM_UNLOCK(intel->driFd, intel->driHwLock, intel->hHWContext);
_glthread_UNLOCK_MUTEX(lockMutex);
+
+ /**
+ * Nothing should be left in batch outside of LOCK/UNLOCK which references
+ * cliprects.
+ */
+ assert(intel->batch->cliprect_mode != REFERENCES_CLIPRECTS);
}
batch->ptr = batch->map;
batch->dirty_state = ~0;
batch->id = batch->intel->batch_id++;
- batch->cliprects_enable = INTEL_BATCH_NO_CLIPRECTS;
+ batch->cliprect_mode = IGNORE_CLIPRECTS;
}
struct intel_batchbuffer *
*/
if (!(intel->numClipRects == 0 &&
- batch->cliprects_enable == INTEL_BATCH_CLIPRECTS)) {
+ batch->cliprect_mode == LOOP_CLIPRECTS)) {
if (intel->ttm == GL_TRUE) {
intel_exec_ioctl(batch->intel,
used,
- batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock,
start, count, &batch->last_fence);
} else {
intel_batch_ioctl(batch->intel,
batch->buf->offset,
used,
- batch->cliprects_enable == INTEL_BATCH_NO_CLIPRECTS,
+ batch->cliprect_mode != LOOP_CLIPRECTS,
allow_unlock);
}
}
dri_post_submit(batch->buf, &batch->last_fence);
if (intel->numClipRects == 0 &&
- batch->cliprects_enable == INTEL_BATCH_CLIPRECTS) {
+ batch->cliprect_mode == LOOP_CLIPRECTS) {
if (allow_unlock) {
/* If we are not doing any actual user-visible rendering,
* do a sched_yield to keep the app from pegging the cpu while
void
intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes,
- enum cliprects_enable cliprects_enable)
+ enum cliprect_mode cliprect_mode)
{
assert((bytes & 3) == 0);
- intel_batchbuffer_require_space(batch, bytes, cliprects_enable);
+ intel_batchbuffer_require_space(batch, bytes, cliprect_mode);
__memcpy(batch->ptr, data, bytes);
batch->ptr += bytes;
}
#define BATCH_SZ 16384
#define BATCH_RESERVED 16
-enum cliprects_enable {
- INTEL_BATCH_CLIPRECTS = 0,
- INTEL_BATCH_NO_CLIPRECTS = 1
+enum cliprect_mode {
+ /**
+ * Batchbuffer contents may be looped over per cliprect, but do not
+ * require it.
+ */
+ IGNORE_CLIPRECTS,
+ /**
+ * Batchbuffer contents require looping over per cliprect at batch submit
+ * time.
+ */
+ LOOP_CLIPRECTS,
+ /**
+ * Batchbuffer contents contain drawing that should not be executed multiple
+ * times.
+ */
+ NO_LOOP_CLIPRECTS,
+ /**
+ * Batchbuffer contents contain drawing that already handles cliprects, such
+ * as 2D drawing to front/back/depth that doesn't respect DRAWING_RECTANGLE.
+ * Equivalent behavior to NO_LOOP_CLIPRECTS, but may not persist in batch
+ * outside of LOCK/UNLOCK.
+ */
+ REFERENCES_CLIPRECTS
};
struct intel_batchbuffer
GLubyte *map;
GLubyte *ptr;
- enum cliprects_enable cliprects_enable;
+ enum cliprect_mode cliprect_mode;
GLuint size;
*/
void intel_batchbuffer_data(struct intel_batchbuffer *batch,
const void *data, GLuint bytes,
- enum cliprects_enable cliprects_enable);
+ enum cliprect_mode cliprect_mode);
void intel_batchbuffer_release_space(struct intel_batchbuffer *batch,
GLuint bytes);
static INLINE void
intel_batchbuffer_require_space(struct intel_batchbuffer *batch,
GLuint sz,
- enum cliprects_enable cliprects_enable)
+ enum cliprect_mode cliprect_mode)
{
assert(sz < batch->size - 8);
if (intel_batchbuffer_space(batch) < sz)
intel_batchbuffer_flush(batch);
- /* Upgrade the buffer to being looped over per cliprect if this batch
- * emit needs it. The code used to emit a batch whenever the
- * cliprects_enable was changed, but reducing the overhead of frequent
- * batch flushing is more important than reducing state parsing,
- * particularly as we move towards private backbuffers and number
- * cliprects always being 1 except at swap.
- */
- if (cliprects_enable == INTEL_BATCH_CLIPRECTS)
- batch->cliprects_enable = INTEL_BATCH_CLIPRECTS;
+ if (cliprect_mode != IGNORE_CLIPRECTS) {
+ if (batch->cliprect_mode == IGNORE_CLIPRECTS) {
+ batch->cliprect_mode = cliprect_mode;
+ } else {
+ if (batch->cliprect_mode != cliprect_mode)
+ intel_batchbuffer_flush(batch);
+ }
+ }
}
/* Here are the crusty old macros, to be removed:
*/
#define BATCH_LOCALS
-#define BEGIN_BATCH(n, cliprects_enable) do { \
- intel_batchbuffer_require_space(intel->batch, (n)*4, cliprects_enable); \
+#define BEGIN_BATCH(n, cliprect_mode) do { \
+ intel_batchbuffer_require_space(intel->batch, (n)*4, cliprect_mode); \
} while (0)
#define OUT_BATCH(d) intel_batchbuffer_emit_dword(intel->batch, d)
-#define OUT_RELOC(buf, cliprects_enable, delta) do { \
+#define OUT_RELOC(buf, cliprect_mode, delta) do { \
assert((delta) >= 0); \
- intel_batchbuffer_emit_reloc(intel->batch, buf, cliprects_enable, delta); \
+ intel_batchbuffer_emit_reloc(intel->batch, buf, cliprect_mode, delta); \
} while (0)
#define ADVANCE_BATCH() do { } while(0)
src_x = box.x1 - dPriv->x + dPriv->backX;
src_y = box.y1 - dPriv->y + dPriv->backY;
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(8, REFERENCES_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13 | dst_pitch);
OUT_BATCH((box.y1 << 16) | box.x1);
assert(w > 0);
assert(h > 0);
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(6, NO_LOOP_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13 | dst_pitch);
OUT_BATCH((y << 16) | x);
assert(dst_x < dst_x2);
assert(dst_y < dst_y2);
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(8, NO_LOOP_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13 | dst_pitch);
OUT_BATCH((dst_y << 16) | dst_x);
assert(dst_x < dst_x2);
assert(h > 0);
- BEGIN_BATCH(8, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(8, NO_LOOP_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13 | dst_pitch);
OUT_BATCH((0 << 16) | dst_x);
_mesa_debug(ctx, "hardware blit clear buf %d rb id %d\n",
buf, irb->Base.Name);
*/
- intel_wait_flips(intel, INTEL_BATCH_NO_CLIPRECTS);
+ intel_wait_flips(intel);
assert(b.x1 < b.x2);
assert(b.y1 < b.y2);
- BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(6, REFERENCES_CLIPRECTS);
OUT_BATCH(CMD);
OUT_BATCH(BR13);
OUT_BATCH((b.y1 << 16) | b.x1);
(8 * 4) +
(3 * 4) +
dwords,
- INTEL_BATCH_NO_CLIPRECTS );
+ NO_LOOP_CLIPRECTS );
opcode = XY_SETUP_BLT_CMD;
if (cpp == 4)
if (dst_tiled)
blit_cmd |= XY_DST_TILED;
- BEGIN_BATCH(8 + 3, INTEL_BATCH_NO_CLIPRECTS);
+ BEGIN_BATCH(8 + 3, NO_LOOP_CLIPRECTS);
OUT_BATCH(opcode);
OUT_BATCH(br13);
OUT_BATCH((0 << 16) | 0); /* clip x1, y1 */
intel_batchbuffer_data( intel->batch,
src_bits,
dwords * 4,
- INTEL_BATCH_NO_CLIPRECTS );
+ NO_LOOP_CLIPRECTS );
}
intel->vtbl.meta_color_mask(intel, GL_TRUE);
intel->vtbl.meta_draw_region(intel, irbColor->region, NULL);
- /* XXX: Using INTEL_BATCH_NO_CLIPRECTS here is dangerous as the
- * drawing origin may not be correctly emitted.
- */
intel->vtbl.meta_draw_quad(intel,
fb->_Xmin,
fb->_Xmax,
/* Emit wait for pending flips */
void
-intel_wait_flips(struct intel_context *intel, GLuint batch_flags)
+intel_wait_flips(struct intel_context *intel)
{
struct intel_framebuffer *intel_fb =
(struct intel_framebuffer *) intel->ctx.DrawBuffer;
BATCH_LOCALS;
/* Wait for pending flips to take effect */
- BEGIN_BATCH(2, batch_flags);
+ BEGIN_BATCH(2, NO_LOOP_CLIPRECTS);
OUT_BATCH(pf_planes & 0x1 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP)
: 0);
OUT_BATCH(pf_planes & 0x2 ? (MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_B_FLIP)
+
/**************************************************************************
*
* Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
extern struct intel_region *intel_drawbuf_region(struct intel_context *intel);
-extern void intel_wait_flips(struct intel_context *intel, GLuint batch_flags);
+extern void intel_wait_flips(struct intel_context *intel);
extern void intelSwapBuffers(__DRIdrawablePrivate * dPriv);