#include "main/enums.h"
#include "main/imports.h"
#include "main/macros.h"
-#include "main/mfeatures.h"
#include "main/mtypes.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tex.h"
-#include "intel_span.h"
#ifndef I915
#include "brw_context.h"
#endif
/** Called by gl_renderbuffer::Delete() */
static void
-intel_delete_renderbuffer(struct gl_renderbuffer *rb)
+intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- free(irb);
+ _mesa_delete_renderbuffer(ctx, rb);
}
/**
return;
}
- /* We sometimes get called with this by our intel_span.c usage. */
- if (!irb->mt) {
- *out_map = NULL;
- *out_stride = 0;
- return;
- }
+ intel_prepare_render(intel);
/* For a window-system renderbuffer, we need to flip the mapping we receive
* upside-down. So we need to ask for a rectangle on flipped vertically, and
return 0;
return 0;
default:
- /* MSAA unsupported. However, a careful reading of
- * EXT_framebuffer_multisample reveals that we need to permit
- * num_samples to be 1 (since num_samples is permitted to be as high as
- * GL_MAX_SAMPLES, and GL_MAX_SAMPLES must be at least 1). Since
- * platforms before Gen6 don't support MSAA, this is safe, because
- * multisampling won't happen anyhow.
- */
- if (num_samples > 0)
- return 1;
+ /* MSAA unsupported. */
return 0;
}
}
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
-GLboolean
+static GLboolean
intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
* except they're less useful because you can't texture with
* them.
*/
- rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
+ rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
GL_NONE, GL_NONE);
break;
case GL_STENCIL_INDEX:
if (!irb->mt)
return false;
- if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
- bool ok = intel_miptree_alloc_hiz(intel, irb->mt, rb->NumSamples);
- if (!ok) {
- intel_miptree_release(&irb->mt);
- return false;
- }
- }
-
- if (irb->mt->msaa_layout == INTEL_MSAA_LAYOUT_CMS) {
- bool ok = intel_miptree_alloc_mcs(intel, irb->mt, rb->NumSamples);
- if (!ok) {
- intel_miptree_release(&irb->mt);
- return false;
- }
- }
-
return true;
}
-#if FEATURE_OES_EGL_image
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
struct gl_renderbuffer *rb,
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- irb->mt = intel_miptree_create_for_region(intel,
- GL_TEXTURE_2D,
- image->format,
- image->region);
+ irb->mt = intel_miptree_create_for_bo(intel,
+ image->region->bo,
+ image->format,
+ image->offset,
+ image->region->width,
+ image->region->height,
+ image->region->pitch,
+ image->region->tiling);
if (!irb->mt)
return;
rb->Format = image->format;
rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
image->internal_format);
+ rb->NeedsFinishRenderTexture = true;
}
-#endif
/**
* Called for each hardware renderbuffer when a _window_ is resized.
intel_draw_buffer(ctx);
}
-/**
- * \par Special case for separate stencil
- *
- * When wrapping a depthstencil texture that uses separate stencil, this
- * function is recursively called twice: once to create \c
- * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
- * call to create \c irb->wrapped_depth, the \c format and \c
- * internal_format parameters do not match \c mt->format. In that case, \c
- * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
- * MESA_FORMAT_X8_Z24.
- *
- * @return true on success
- */
-
static bool
intel_renderbuffer_update_wrapper(struct intel_context *intel,
struct intel_renderbuffer *irb,
struct intel_mipmap_tree *mt = intel_image->mt;
int level = image->Level;
- rb->Format = image->TexFormat;
- rb->InternalFormat = image->InternalFormat;
- rb->_BaseFormat = image->_BaseFormat;
- rb->Width = mt->level[level].width;
- rb->Height = mt->level[level].height;
+ rb->Depth = image->Depth;
- rb->Delete = intel_delete_renderbuffer;
rb->AllocStorage = intel_nop_alloc_storage;
intel_miptree_check_level_layer(mt, level, layer);
irb->mt_level = level;
- irb->mt_layer = layer;
+
+ switch (mt->msaa_layout) {
+ case INTEL_MSAA_LAYOUT_UMS:
+ case INTEL_MSAA_LAYOUT_CMS:
+ irb->mt_layer = layer * mt->num_samples;
+ break;
+
+ default:
+ irb->mt_layer = layer;
+ }
intel_miptree_reference(&irb->mt, mt);
if (mt->hiz_mt == NULL &&
intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
- intel_miptree_alloc_hiz(intel, mt, 0 /* num_samples */);
+ intel_miptree_alloc_hiz(intel, mt);
if (!mt->hiz_mt)
return false;
}
/* compute offset of the particular 2D image within the texture region */
intel_miptree_get_image_offset(irb->mt,
irb->mt_level,
- 0, /* face, which we ignore */
irb->mt_layer,
&dst_x, &dst_y);
irb->draw_y = dst_y;
}
-/**
- * Rendering to tiled buffers requires that the base address of the
- * buffer be aligned to a page boundary. We generally render to
- * textures by pointing the surface at the mipmap image level, which
- * may not be aligned to a tile boundary.
- *
- * This function returns an appropriately-aligned base offset
- * according to the tiling restrictions, plus any required x/y offset
- * from there.
- */
-uint32_t
-intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
- uint32_t *tile_x,
- uint32_t *tile_y)
-{
- struct intel_region *region = irb->mt->region;
- uint32_t mask_x, mask_y;
-
- intel_region_get_tile_masks(region, &mask_x, &mask_y);
-
- *tile_x = irb->draw_x & mask_x;
- *tile_y = irb->draw_y & mask_y;
- return intel_region_get_aligned_offset(region, irb->draw_x & ~mask_x,
- irb->draw_y & ~mask_y);
-}
-
/**
* Called by glFramebufferTexture[123]DEXT() (and other places) to
* prepare for rendering into texture memory. This might be called
struct gl_renderbuffer_attachment *att)
{
struct intel_context *intel = intel_context(ctx);
- struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
- struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct gl_renderbuffer *rb = att->Renderbuffer;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct gl_texture_image *image = rb->TexImage;
struct intel_texture_image *intel_image = intel_texture_image(image);
struct intel_mipmap_tree *mt = intel_image->mt;
int layer;
/* Fallback on drawing to a texture that doesn't have a miptree
* (has a border, width/height 0, etc.)
*/
- _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
_swrast_render_texture(ctx, fb, att);
return;
}
- else if (!irb) {
- intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
-
- irb = (struct intel_renderbuffer *)intel_new_renderbuffer(ctx, ~0);
- if (irb) {
- /* bind the wrapper to the attachment point */
- _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base.Base);
- }
- else {
- /* fallback to software rendering */
- _swrast_render_texture(ctx, fb, att);
- return;
- }
- }
+ intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
- _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
_swrast_render_texture(ctx, fb, att);
return;
}
- irb->tex_image = image;
-
- DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
+ DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
_mesa_get_format_name(image->TexFormat),
- att->Texture->Name, image->Width, image->Height,
- irb->Base.Base.RefCount);
+ att->Texture->Name, image->Width, image->Height, image->Depth,
+ rb->RefCount);
/* update drawing region, etc */
intel_draw_buffer(ctx);
* Called by Mesa when rendering to a texture is done.
*/
static void
-intel_finish_render_texture(struct gl_context * ctx,
- struct gl_renderbuffer_attachment *att)
+intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
{
struct intel_context *intel = intel_context(ctx);
- struct gl_texture_object *tex_obj = att->Texture;
- struct gl_texture_image *image =
- tex_obj->Image[att->CubeMapFace][att->TextureLevel];
- struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
-
- DBG("Finish render %s texture tex=%u\n",
- _mesa_get_format_name(image->TexFormat), att->Texture->Name);
- if (irb)
- irb->tex_image = NULL;
+ DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
/* Since we've (probably) rendered to the texture and will (likely) use
* it in the texture domain later on in this batchbuffer, flush the
intel_batchbuffer_emit_mi_flush(intel);
}
+#define fbo_incomplete(fb, ...) do { \
+ static GLuint msg_id = 0; \
+ if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
+ _mesa_gl_debug(ctx, &msg_id, \
+ MESA_DEBUG_TYPE_OTHER, \
+ MESA_DEBUG_SEVERITY_MEDIUM, \
+ __VA_ARGS__); \
+ } \
+ DBG(__VA_ARGS__); \
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
+ } while (0)
+
/**
* Do additional "completeness" testing of a framebuffer object.
*/
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
struct intel_context *intel = intel_context(ctx);
- const struct intel_renderbuffer *depthRb =
+ struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH);
- const struct intel_renderbuffer *stencilRb =
+ struct intel_renderbuffer *stencilRb =
intel_get_renderbuffer(fb, BUFFER_STENCIL);
struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
int i;
*/
if (depthRb->mt_level != stencilRb->mt_level ||
depthRb->mt_layer != stencilRb->mt_layer) {
- DBG("depth image level/layer %d/%d != stencil image %d/%d\n",
- depthRb->mt_level,
- depthRb->mt_layer,
- stencilRb->mt_level,
- stencilRb->mt_layer);
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb,
+ "FBO incomplete: depth image level/layer %d/%d != "
+ "stencil image %d/%d\n",
+ depthRb->mt_level,
+ depthRb->mt_layer,
+ stencilRb->mt_level,
+ stencilRb->mt_layer);
}
} else {
if (!intel->has_separate_stencil) {
- DBG("separate stencil unsupported\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: separate stencil "
+ "unsupported\n");
}
if (stencil_mt->format != MESA_FORMAT_S8) {
- DBG("separate stencil is %s instead of S8\n",
- _mesa_get_format_name(stencil_mt->format));
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: separate stencil is %s "
+ "instead of S8\n",
+ _mesa_get_format_name(stencil_mt->format));
}
- if (intel->gen < 7 && depth_mt->hiz_mt == NULL) {
+ if (intel->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
/* Before Gen7, separate depth and stencil buffers can be used
* only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
* Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
* [DevSNB]: This field must be set to the same value (enabled
* or disabled) as Hierarchical Depth Buffer Enable.
*/
- DBG("separate stencil without HiZ\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED;
+ fbo_incomplete(fb, "FBO incomplete: separate stencil "
+ "without HiZ\n");
}
}
}
*/
rb = fb->Attachment[i].Renderbuffer;
if (rb == NULL) {
- DBG("attachment without renderbuffer\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: attachment without "
+ "renderbuffer\n");
continue;
}
if (fb->Attachment[i].Type == GL_TEXTURE) {
- const struct gl_texture_image *img =
- _mesa_get_attachment_teximage_const(&fb->Attachment[i]);
-
- if (img->Border) {
- DBG("texture with border\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ if (rb->TexImage->Border) {
+ fbo_incomplete(fb, "FBO incomplete: texture with border\n");
continue;
}
}
irb = intel_renderbuffer(rb);
if (irb == NULL) {
- DBG("software rendering renderbuffer\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: software rendering "
+ "renderbuffer\n");
continue;
}
if (!intel->vtbl.render_target_supported(intel, rb)) {
- DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
- _mesa_get_format_name(intel_rb_format(irb)));
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
+ "texture/renderbuffer format attached: %s\n",
+ _mesa_get_format_name(intel_rb_format(irb)));
}
}
}
GLbitfield mask, GLenum filter)
{
if (mask & GL_COLOR_BUFFER_BIT) {
+ GLint i;
const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
const struct gl_framebuffer *readFb = ctx->ReadBuffer;
- const struct gl_renderbuffer_attachment *drawAtt =
- &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
+ const struct gl_renderbuffer_attachment *drawAtt;
struct intel_renderbuffer *srcRb =
intel_renderbuffer(readFb->_ColorReadBuffer);
- /* If the source and destination are the same size with no
- mirroring, the rectangles are within the size of the
- texture and there is no scissor then we can use
- glCopyTexSubimage2D to implement the blit. This will end
- up as a fast hardware blit on some drivers */
- if (srcRb && drawAtt && drawAtt->Texture &&
- srcX0 - srcX1 == dstX0 - dstX1 &&
- srcY0 - srcY1 == dstY0 - dstY1 &&
- srcX1 >= srcX0 &&
- srcY1 >= srcY0 &&
- srcX0 >= 0 && srcX1 <= readFb->Width &&
- srcY0 >= 0 && srcY1 <= readFb->Height &&
- dstX0 >= 0 && dstX1 <= drawFb->Width &&
- dstY0 >= 0 && dstY1 <= drawFb->Height &&
- !ctx->Scissor.Enabled) {
- const struct gl_texture_object *texObj = drawAtt->Texture;
- const GLuint dstLevel = drawAtt->TextureLevel;
- const GLenum target = texObj->Target;
-
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, dstLevel);
-
- if (intel_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- dstX0, dstY0,
- srcRb,
- srcX0, srcY0,
- srcX1 - srcX0, /* width */
- srcY1 - srcY0))
- mask &= ~GL_COLOR_BUFFER_BIT;
+ /* If the source and destination are the same size with no mirroring,
+ * the rectangles are within the size of the texture and there is no
+ * scissor then we can use glCopyTexSubimage2D to implement the blit.
+ * This will end up as a fast hardware blit on some drivers.
+ */
+ const GLboolean use_intel_copy_texsubimage =
+ srcX0 - srcX1 == dstX0 - dstX1 &&
+ srcY0 - srcY1 == dstY0 - dstY1 &&
+ srcX1 >= srcX0 &&
+ srcY1 >= srcY0 &&
+ srcX0 >= 0 && srcX1 <= readFb->Width &&
+ srcY0 >= 0 && srcY1 <= readFb->Height &&
+ dstX0 >= 0 && dstX1 <= drawFb->Width &&
+ dstY0 >= 0 && dstY1 <= drawFb->Height &&
+ !ctx->Scissor.Enabled;
+
+ /* Verify that all the draw buffers can be blitted using
+ * intel_copy_texsubimage().
+ */
+ for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
+ int idx = ctx->DrawBuffer->_ColorDrawBufferIndexes[i];
+ if (idx == -1)
+ continue;
+ drawAtt = &drawFb->Attachment[idx];
+
+ if (srcRb && drawAtt && drawAtt->Texture &&
+ use_intel_copy_texsubimage)
+ continue;
+ else
+ return mask;
+ }
+
+ /* Blit to all active draw buffers */
+ for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
+ int idx = ctx->DrawBuffer->_ColorDrawBufferIndexes[i];
+ if (idx == -1)
+ continue;
+ drawAtt = &drawFb->Attachment[idx];
+
+ {
+ const struct gl_texture_object *texObj = drawAtt->Texture;
+ const GLuint dstLevel = drawAtt->TextureLevel;
+ const GLenum target = texObj->Target;
+
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, dstLevel);
+
+ if (!intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ dstX0, dstY0,
+ srcRb,
+ srcX0, srcY0,
+ srcX1 - srcX0, /* width */
+ srcY1 - srcY0))
+ return mask;
+ }
}
+
+ mask &= ~GL_COLOR_BUFFER_BIT;
}
return mask;
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
- mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
- srcX0, srcY0, srcX1, srcY1,
- dstX0, dstY0, dstX1, dstY1,
- mask, filter);
- if (mask == 0x0)
- return;
-
#ifndef I915
mask = brw_blorp_framebuffer(intel_context(ctx),
srcX0, srcY0, srcX1, srcY1,
return;
#endif
+ /* Try glCopyTexSubImage2D approach which uses the BLT. */
+ mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+
_mesa_meta_BlitFramebuffer(ctx,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
}
+/**
+ * This is a no-op except on multisample buffers shared with DRI2.
+ */
+void
+intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
+{
+ if (irb->mt && irb->mt->singlesample_mt)
+ irb->mt->need_downsample = true;
+}
+
+/**
+ * Does the renderbuffer have hiz enabled?
+ */
+bool
+intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
+{
+ return intel_miptree_slice_has_hiz(irb->mt, irb->mt_level, irb->mt_layer);
+}
+
void
intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
{
return false;
}
+void
+intel_renderbuffer_move_to_temp(struct intel_context *intel,
+ struct intel_renderbuffer *irb,
+ bool invalidate)
+{
+ struct gl_renderbuffer *rb =&irb->Base.Base;
+ struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
+ struct intel_mipmap_tree *new_mt;
+ int width, height, depth;
+
+ intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
+
+ new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
+ intel_image->base.Base.TexFormat,
+ intel_image->base.Base.Level,
+ intel_image->base.Base.Level,
+ width, height, depth,
+ true,
+ irb->mt->num_samples,
+ INTEL_MIPTREE_TILING_ANY);
+
+ if (intel->vtbl.is_hiz_depth_format(intel, new_mt->format)) {
+ intel_miptree_alloc_hiz(intel, new_mt);
+ }
+
+ intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
+
+ intel_miptree_reference(&irb->mt, intel_image->mt);
+ intel_renderbuffer_set_draw_offset(irb);
+ intel_miptree_release(&new_mt);
+}
+
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
-
-#if FEATURE_OES_EGL_image
intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
intel_image_target_renderbuffer_storage;
-#endif
}