This uses a stamp mechanisms to mark the DRI drawable as invalid.
Instead of immediately updating the buffers we just bump the drawable
stamp and call out to DRI2GetBuffers "later".
"Later" used to be at LOCK_HARDWARE time, and this patch brings back
callouts at the points where we used to call LOCK_HARDWARE. A new function,
intel_prepare_render(), is called where we used to call LOCK_HARDWARE,
and if the buffers are invalid, we call out to DRI2GetBuffers there.
This lets us invalidate buffers only when notified instead of on
every glViewport() call. If the loader calls the DRI invalidate
entrypoint, we disable viewport triggered buffer invalidation.
Additionally, we can clean up the old viewport mechanism a bit,
since we can just invalidate the buffers and not worry about
reentrancy and whatnot.
pcp->driDrawablePriv = NULL;
pcp->loaderPrivate = data;
+ pcp->dri2.draw_stamp = 0;
+ pcp->dri2.read_stamp = 0;
/* When the first context is created for a screen, initialize a "dummy"
* context.
*/
* The loaders's private context data. This structure is opaque.
*/
void *loaderPrivate;
+
+ struct {
+ int draw_stamp;
+ int read_stamp;
+ } dri2;
};
/**
* so can't access it earlier.
*/
+ intel_prepare_render(intel);
+
for (i = 0; i < nr_prims; i++) {
uint32_t hw_prim;
break;
} while (pass < 2);
+ intel_prepare_render(intel);
+
if (pass >= 2) {
dri_bo_map(dst_buffer, GL_TRUE);
dri_bo_map(src_buffer, GL_FALSE);
GLuint buf;
all = (cw == fb->Width && ch == fb->Height);
+ intel_prepare_render(intel);
+
/* Loop over all renderbuffers */
for (buf = 0; buf < BUFFER_COUNT && mask; buf++) {
const GLbitfield bufBit = 1 << buf;
intel->is_front_buffer_rendering = (mode == GL_FRONT_LEFT)
|| (mode == GL_FRONT);
- /* If we weren't front-buffer rendering before but we are now, make sure
- * that the front-buffer has actually been allocated.
+ /* If we weren't front-buffer rendering before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start rendering again.
*/
- if (!was_front_buffer_rendering && intel->is_front_buffer_rendering) {
- intel_update_renderbuffers(intel->driContext,
- intel->driContext->driDrawablePriv);
- }
+ if (!was_front_buffer_rendering && intel->is_front_buffer_rendering)
+ dri2InvalidateDrawable(intel->driContext->driDrawablePriv);
}
intel_draw_buffer(ctx, ctx->DrawBuffer);
intel->is_front_buffer_reading = (mode == GL_FRONT_LEFT)
|| (mode == GL_FRONT);
- /* If we weren't front-buffer reading before but we are now, make sure
- * that the front-buffer has actually been allocated.
+ /* If we weren't front-buffer reading before but we are now,
+ * invalidate our DRI drawable so we'll ask for new buffers
+ * (including the fake front) before we start reading again.
*/
- if (!was_front_buffer_reading && intel->is_front_buffer_reading) {
- intel_update_renderbuffers(intel->driContext,
- intel->driContext->driDrawablePriv);
- }
+ if (!was_front_buffer_reading && intel->is_front_buffer_reading)
+ dri2InvalidateDrawable(intel->driContext->driReadablePriv);
}
if (ctx->ReadBuffer == ctx->DrawBuffer) {
unsigned int attachments[10];
const char *region_name;
+ /* If we're rendering to the fake front buffer, make sure all the
+ * pending drawing has landed on the real front buffer. Otherwise
+ * when we eventually get to DRI2GetBuffersWithFormat the stale
+ * real front buffer contents will get copied to the new fake front
+ * buffer.
+ */
+ if (intel->is_front_buffer_rendering)
+ intel_flush(&intel->ctx, GL_FALSE);
+
if (INTEL_DEBUG & DEBUG_DRI)
fprintf(stderr, "enter %s, drawable %p\n", __func__, drawable);
drawable->lastStamp = drawable->dri2.stamp;
}
+void
+intel_prepare_render(struct intel_context *intel)
+{
+ __DRIcontext *driContext = intel->driContext;
+ __DRIdrawable *drawable;
+
+ drawable = intel->driDrawable;
+ if (drawable->dri2.stamp != driContext->dri2.draw_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ intel_draw_buffer(&intel->ctx, intel->ctx.DrawBuffer);
+ driContext->dri2.draw_stamp = drawable->dri2.stamp;
+ }
+
+ drawable = intel->driReadDrawable;
+ if (drawable->dri2.stamp != driContext->dri2.read_stamp) {
+ if (drawable->lastStamp != drawable->dri2.stamp)
+ intel_update_renderbuffers(driContext, drawable);
+ driContext->dri2.read_stamp = drawable->dri2.stamp;
+ }
+}
+
void
intel_viewport(GLcontext *ctx, GLint x, GLint y, GLsizei w, GLsizei h)
{
struct intel_context *intel = intel_context(ctx);
__DRIcontext *driContext = intel->driContext;
- void (*old_viewport)(GLcontext *ctx, GLint x, GLint y,
- GLsizei w, GLsizei h);
-
- if (!intel->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
- /* If we're rendering to the fake front buffer, make sure all the pending
- * drawing has landed on the real front buffer. Otherwise when we
- * eventually get to DRI2GetBuffersWithFormat the stale real front
- * buffer contents will get copied to the new fake front buffer.
- */
- if (intel->is_front_buffer_rendering) {
- intel_flush(ctx, GL_FALSE);
- }
- intel_update_renderbuffers(driContext, driContext->driDrawablePriv);
- if (driContext->driDrawablePriv != driContext->driReadablePriv)
- intel_update_renderbuffers(driContext, driContext->driReadablePriv);
+ if (!intel->using_dri2_swapbuffers &&
+ !intel->meta.internal_viewport_call && ctx->DrawBuffer->Name == 0) {
+ dri2InvalidateDrawable(driContext->driDrawablePriv);
+ dri2InvalidateDrawable(driContext->driReadablePriv);
}
-
- old_viewport = ctx->Driver.Viewport;
- ctx->Driver.Viewport = NULL;
- intel->driDrawable = driContext->driDrawablePriv;
- intel_draw_buffer(ctx, intel->ctx.DrawBuffer);
- ctx->Driver.Viewport = old_viewport;
}
-
static const struct dri_debug_control debug_control[] = {
{ "tex", DEBUG_TEXTURE},
{ "state", DEBUG_STATE},
if (driContextPriv) {
struct gl_framebuffer *fb = driDrawPriv->driverPrivate;
struct gl_framebuffer *readFb = driReadPriv->driverPrivate;
-
- intel_update_renderbuffers(driContextPriv, driDrawPriv);
- if (driDrawPriv != driReadPriv)
- intel_update_renderbuffers(driContextPriv, driReadPriv);
-
- /* set GLframebuffer size to match window, if needed */
- driUpdateFramebufferSize(&intel->ctx, driDrawPriv);
-
- if (driReadPriv != driDrawPriv) {
- driUpdateFramebufferSize(&intel->ctx, driReadPriv);
- }
_mesa_make_current(&intel->ctx, fb, readFb);
intel->driReadDrawable = driReadPriv;
intel->driDrawable = driDrawPriv;
- intel_draw_buffer(&intel->ctx, fb);
+ driContextPriv->dri2.draw_stamp = driDrawPriv->dri2.stamp - 1;
+ driContextPriv->dri2.read_stamp = driReadPriv->dri2.stamp - 1;
}
else {
_mesa_make_current(NULL, NULL, NULL);
void intel_update_renderbuffers(__DRIcontext *context,
__DRIdrawable *drawable);
+void intel_prepare_render(struct intel_context *intel);
void i915_set_buf_info_for_region(uint32_t *state, struct intel_region *region,
uint32_t buffer_id);
if (!intel_check_blit_fragment_ops(ctx, tmpColor[3] == 1.0F))
return GL_FALSE;
+ intel_prepare_render(intel);
+
/* Clip to buffer bounds and scissor. */
if (!_mesa_clip_to_region(fb->_Xmin, fb->_Ymin,
fb->_Xmax, fb->_Ymax,
intelFlush(&intel->ctx);
+ intel_prepare_render(intel);
+
/* XXX: We fail to handle different inversion between read and draw framebuffer. */
/* Clip to destination buffer. */
return GL_TRUE;
}
+ intel_prepare_render(intel);
+
all = (width * height * src->cpp == dst->Base.Size &&
x == 0 && dst_offset == 0);
fprintf(stderr, "%s\n", __FUNCTION__);
intelFlush(ctx);
+ intel_prepare_render(intel_context(ctx));
if (do_blit_readpixels
(ctx, x, y, width, height, format, type, pack, pixels))
intel_region_cow(intel, dst);
}
+ intel_prepare_render(intel);
+
_mesa_copy_rect(intel_region_map(intel, dst) + dst_offset,
dst->cpp,
dst->pitch,
/* Now blit from the texture buffer to the new buffer:
*/
+ intel_prepare_render(intel);
ok = intelEmitCopyBlit(intel,
region->cpp,
region->pitch, pbo->buffer, 0, region->tiling,
intel->using_dri2_swapbuffers = GL_TRUE;
dri2InvalidateDrawable(drawable);
- intel_update_renderbuffers(intel->driContext, drawable);
}
static const struct __DRI2flushExtensionRec intelFlushExtension = {
GLuint i;
intelFlush(&intel->ctx);
+ intel_prepare_render(intel);
for (i = 0; i < ctx->Const.MaxTextureImageUnits; i++) {
if (ctx->Texture.Unit[i]._ReallyEnabled) {
}
/* intelFlush(ctx); */
+ intel_prepare_render(intel);
{
drm_intel_bo *dst_bo = intel_region_buffer(intel,
intelImage->mt->region,
if (drm_intel_bo_references(intel->batch->buf, dst_buffer))
intelFlush(&intel->ctx);
+ intel_prepare_render(intel);
{
dri_bo *src_buffer = intel_bufferobj_buffer(intel, pbo, INTEL_READ);
pixels, unpack, "glTexImage");
}
+ intel_prepare_render(intel);
+
if (intelImage->mt) {
if (pixels != NULL) {
/* Flush any queued rendering with the texture before mapping. */
if (!intelObj)
return;
- if (dPriv->lastStamp != *dPriv->pStamp)
+ if (dPriv->lastStamp != dPriv->dri2.stamp)
intel_update_renderbuffers(pDRICtx, dPriv);
rb = intel_get_renderbuffer(fb, BUFFER_FRONT_LEFT);
if (!pixels)
return;
+ intel_prepare_render(intel);
+
/* Map buffer if necessary. Need to lock to prevent other contexts
* from uploading the buffer under us.
*/