#include "main/enums.h"
#include "main/imports.h"
#include "main/macros.h"
-#include "main/mfeatures.h"
#include "main/mtypes.h"
#include "main/fbobject.h"
#include "main/framebuffer.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
#include "intel_tex.h"
-#include "intel_span.h"
#ifndef I915
#include "brw_context.h"
#endif
#define FILE_DEBUG_FLAG DEBUG_FBO
-
-bool
-intel_framebuffer_has_hiz(struct gl_framebuffer *fb)
-{
- struct intel_renderbuffer *rb = NULL;
- if (fb)
- rb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
- return rb && rb->mt && rb->mt->hiz_mt;
-}
+static struct gl_renderbuffer *
+intel_new_renderbuffer(struct gl_context * ctx, GLuint name);
struct intel_region*
intel_get_rb_region(struct gl_framebuffer *fb, GLuint attIndex)
{
struct intel_renderbuffer *irb = intel_get_renderbuffer(fb, attIndex);
- if (irb && irb->mt)
- return irb->mt->region;
- else
+ if (irb && irb->mt) {
+ if (attIndex == BUFFER_STENCIL && irb->mt->stencil_mt)
+ return irb->mt->stencil_mt->region;
+ else
+ return irb->mt->region;
+ } else
return NULL;
}
/** Called by gl_renderbuffer::Delete() */
static void
-intel_delete_renderbuffer(struct gl_renderbuffer *rb)
+intel_delete_renderbuffer(struct gl_context *ctx, struct gl_renderbuffer *rb)
{
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- _mesa_reference_renderbuffer(&irb->wrapped_depth, NULL);
- _mesa_reference_renderbuffer(&irb->wrapped_stencil, NULL);
-
- free(irb);
-}
-
-/**
- * \brief Map a renderbuffer through the GTT.
- *
- * \see intel_map_renderbuffer()
- */
-static void
-intel_map_renderbuffer_gtt(struct gl_context *ctx,
- struct gl_renderbuffer *rb,
- GLuint x, GLuint y, GLuint w, GLuint h,
- GLbitfield mode,
- GLubyte **out_map,
- GLint *out_stride)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- GLubyte *map;
- int stride, flip_stride;
-
- assert(irb->mt);
-
- intel_renderbuffer_resolve_depth(intel, irb);
- if (mode & GL_MAP_WRITE_BIT) {
- intel_renderbuffer_set_needs_hiz_resolve(irb);
- }
-
- irb->map_mode = mode;
- irb->map_x = x;
- irb->map_y = y;
- irb->map_w = w;
- irb->map_h = h;
-
- stride = irb->mt->region->pitch * irb->mt->region->cpp;
-
- if (rb->Name == 0) {
- y = irb->mt->region->height - 1 - y;
- flip_stride = -stride;
- } else {
- x += irb->draw_x;
- y += irb->draw_y;
- flip_stride = stride;
- }
-
- if (drm_intel_bo_references(intel->batch.bo, irb->mt->region->bo)) {
- intel_batchbuffer_flush(intel);
- }
-
- drm_intel_gem_bo_map_gtt(irb->mt->region->bo);
-
- map = irb->mt->region->bo->virtual;
- map += x * irb->mt->region->cpp;
- map += (int)y * stride;
-
- *out_map = map;
- *out_stride = flip_stride;
-
- DBG("%s: rb %d (%s) gtt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
- __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
- x, y, w, h, *out_map, *out_stride);
-}
-
-/**
- * \brief Map a renderbuffer by blitting it to a temporary gem buffer.
- *
- * On gen6+, we have LLC sharing, which means we can get high-performance
- * access to linear-mapped buffers.
- *
- * This function allocates a temporary gem buffer at
- * intel_renderbuffer::map_bo, then blits the renderbuffer into it, and
- * returns a map of that. (Note: Only X tiled buffers can be blitted).
- *
- * \see intel_renderbuffer::map_bo
- * \see intel_map_renderbuffer()
- */
-static void
-intel_map_renderbuffer_blit(struct gl_context *ctx,
- struct gl_renderbuffer *rb,
- GLuint x, GLuint y, GLuint w, GLuint h,
- GLbitfield mode,
- GLubyte **out_map,
- GLint *out_stride)
-{
- struct intel_context *intel = intel_context(ctx);
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
-
- int src_x, src_y;
- int dst_stride;
-
- assert(irb->mt->region);
- assert(intel->gen >= 6);
- assert(!(mode & GL_MAP_WRITE_BIT));
- assert(irb->mt->region->tiling == I915_TILING_X);
-
- irb->map_mode = mode;
- irb->map_x = x;
- irb->map_y = y;
- irb->map_w = w;
- irb->map_h = h;
-
- dst_stride = ALIGN(w * irb->mt->region->cpp, 4);
-
- if (rb->Name) {
- src_x = x + irb->draw_x;
- src_y = y + irb->draw_y;
- } else {
- src_x = x;
- src_y = irb->mt->region->height - y - h;
- }
-
- irb->map_bo = drm_intel_bo_alloc(intel->bufmgr, "MapRenderbuffer() temp",
- dst_stride * h, 4096);
-
- /* We don't do the flip in the blit, because it's always so tricky to get
- * right.
- */
- if (irb->map_bo &&
- intelEmitCopyBlit(intel,
- irb->mt->region->cpp,
- irb->mt->region->pitch, irb->mt->region->bo,
- 0, irb->mt->region->tiling,
- dst_stride / irb->mt->region->cpp, irb->map_bo,
- 0, I915_TILING_NONE,
- src_x, src_y,
- 0, 0,
- w, h,
- GL_COPY)) {
- intel_batchbuffer_flush(intel);
- drm_intel_bo_map(irb->map_bo, false);
-
- if (rb->Name) {
- *out_map = irb->map_bo->virtual;
- *out_stride = dst_stride;
- } else {
- *out_map = irb->map_bo->virtual + (h - 1) * dst_stride;
- *out_stride = -dst_stride;
- }
-
- DBG("%s: rb %d (%s) blit mapped: (%d, %d) (%dx%d) -> %p/%d\n",
- __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
- src_x, src_y, w, h, *out_map, *out_stride);
- } else {
- /* Fallback to GTT mapping. */
- drm_intel_bo_unreference(irb->map_bo);
- irb->map_bo = NULL;
- intel_map_renderbuffer_gtt(ctx, rb,
- x, y, w, h,
- mode,
- out_map, out_stride);
- }
+ _mesa_delete_renderbuffer(ctx, rb);
}
/**
GLint *out_stride)
{
struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
-
- /* We sometimes get called with this by our intel_span.c usage. */
- if (!irb->mt && !irb->wrapped_depth) {
- *out_map = NULL;
- *out_stride = 0;
+ void *map;
+ int stride;
+
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer), not an irb */
+ GLint bpp = _mesa_get_format_bytes(rb->Format);
+ GLint rowStride = srb->RowStride;
+ *out_map = (GLubyte *) srb->Buffer + y * rowStride + x * bpp;
+ *out_stride = rowStride;
return;
}
- if (rb->Format == MESA_FORMAT_S8 || irb->wrapped_depth) {
- void *map;
- int stride;
+ intel_prepare_render(intel);
- /* For a window-system renderbuffer, we need to flip the mapping we
- * receive upside-down. So we need to ask for a rectangle on flipped
- * vertically, and we then return a pointer to the bottom of it with a
- * negative stride.
- */
- if (rb->Name == 0) {
- y = rb->Height - y - h;
- }
-
- intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
- x, y, w, h, mode, &map, &stride);
+ /* For a window-system renderbuffer, we need to flip the mapping we receive
+ * upside-down. So we need to ask for a rectangle on flipped vertically, and
+ * we then return a pointer to the bottom of it with a negative stride.
+ */
+ if (rb->Name == 0) {
+ y = rb->Height - y - h;
+ }
- if (rb->Name == 0) {
- map += (h - 1) * stride;
- stride = -stride;
- }
+ intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+ x, y, w, h, mode, &map, &stride);
- DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
- __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
- x, y, w, h, *out_map, *out_stride);
-
- *out_map = map;
- *out_stride = stride;
- } else if (intel->gen >= 6 &&
- !(mode & GL_MAP_WRITE_BIT) &&
- irb->mt->region->tiling == I915_TILING_X) {
- intel_map_renderbuffer_blit(ctx, rb, x, y, w, h, mode,
- out_map, out_stride);
- } else {
- intel_map_renderbuffer_gtt(ctx, rb, x, y, w, h, mode,
- out_map, out_stride);
+ if (rb->Name == 0) {
+ map += (h - 1) * stride;
+ stride = -stride;
}
+
+ DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
+ __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
+ x, y, w, h, map, stride);
+
+ *out_map = map;
+ *out_stride = stride;
}
/**
struct gl_renderbuffer *rb)
{
struct intel_context *intel = intel_context(ctx);
+ struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
DBG("%s: rb %d (%s)\n", __FUNCTION__,
rb->Name, _mesa_get_format_name(rb->Format));
- if (rb->Format == MESA_FORMAT_S8 || irb->wrapped_depth) {
- intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
- } else if (irb->map_bo) {
- /* Paired with intel_map_renderbuffer_blit(). */
- drm_intel_bo_unmap(irb->map_bo);
- drm_intel_bo_unreference(irb->map_bo);
- irb->map_bo = 0;
- } else {
- /* Paired with intel_map_renderbuffer_gtt(). */
- if (irb->mt) {
- /* The miptree may be null when intel_map_renderbuffer() is
- * called from intel_span.c.
- */
- drm_intel_gem_bo_unmap_gtt(irb->mt->region->bo);
- }
+ if (srb->Buffer) {
+ /* this is a malloc'd renderbuffer (accum buffer) */
+ /* nothing to do */
+ return;
}
+
+ intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
}
+
/**
- * Return a pointer to a specific pixel in a renderbuffer.
+ * Round up the requested multisample count to the next supported sample size.
*/
-static void *
-intel_get_pointer(struct gl_context * ctx, struct gl_renderbuffer *rb,
- GLint x, GLint y)
-{
- /* By returning NULL we force all software rendering to go through
- * the span routines.
- */
- return NULL;
+unsigned
+intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
+{
+ switch (intel->gen) {
+ case 6:
+ /* Gen6 supports only 4x multisampling. */
+ if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ case 7:
+ /* Gen7 supports 4x and 8x multisampling. */
+ if (num_samples > 4)
+ return 8;
+ else if (num_samples > 0)
+ return 4;
+ else
+ return 0;
+ return 0;
+ default:
+ /* MSAA unsupported. */
+ return 0;
+ }
}
* Called via glRenderbufferStorageEXT() to set the format and allocate
* storage for a user-created renderbuffer.
*/
-GLboolean
+static GLboolean
intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat,
GLuint width, GLuint height)
{
struct intel_context *intel = intel_context(ctx);
+ struct intel_screen *screen = intel->intelScreen;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- int cpp, tiling;
-
- ASSERT(rb->Name != 0);
+ rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
switch (internalFormat) {
default:
* except they're less useful because you can't texture with
* them.
*/
- rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, internalFormat,
+ rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
GL_NONE, GL_NONE);
break;
case GL_STENCIL_INDEX:
rb->Width = width;
rb->Height = height;
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
- rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
- cpp = _mesa_get_format_bytes(rb->Format);
-
- intel_flush(ctx);
intel_miptree_release(&irb->mt);
_mesa_lookup_enum_by_nr(internalFormat),
_mesa_get_format_name(rb->Format), width, height);
- tiling = I915_TILING_NONE;
- if (intel->use_texture_tiling) {
- GLenum base_format = _mesa_get_format_base_format(rb->Format);
-
- if (intel->gen >= 4 && (base_format == GL_DEPTH_COMPONENT ||
- base_format == GL_STENCIL_INDEX ||
- base_format == GL_DEPTH_STENCIL))
- tiling = I915_TILING_Y;
- else
- tiling = I915_TILING_X;
- }
-
- if (irb->Base.Format == MESA_FORMAT_S8) {
- /*
- * The stencil buffer is W tiled. However, we request from the kernel a
- * non-tiled buffer because the GTT is incapable of W fencing.
- *
- * The stencil buffer has quirky pitch requirements. From Vol 2a,
- * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
- * The pitch must be set to 2x the value computed based on width, as
- * the stencil buffer is stored with two rows interleaved.
- * To accomplish this, we resort to the nasty hack of doubling the drm
- * region's cpp and halving its height.
- *
- * If we neglect to double the pitch, then render corruption occurs.
- */
- irb->mt = intel_miptree_create_for_renderbuffer(
- intel,
- rb->Format,
- I915_TILING_NONE,
- cpp * 2,
- ALIGN(width, 64),
- ALIGN((height + 1) / 2, 64));
- if (!irb->mt)
- return false;
-
- } else if (irb->Base.Format == MESA_FORMAT_S8_Z24
- && intel->has_separate_stencil) {
-
- bool ok = true;
- struct gl_renderbuffer *depth_rb;
- struct gl_renderbuffer *stencil_rb;
- struct intel_renderbuffer *depth_irb, *stencil_irb;
-
- depth_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
- MESA_FORMAT_X8_Z24);
- stencil_rb = intel_create_wrapped_renderbuffer(ctx, width, height,
- MESA_FORMAT_S8);
- ok = depth_rb && stencil_rb;
- ok = ok && intel_alloc_renderbuffer_storage(ctx, depth_rb,
- depth_rb->InternalFormat,
- width, height);
- ok = ok && intel_alloc_renderbuffer_storage(ctx, stencil_rb,
- stencil_rb->InternalFormat,
- width, height);
-
- if (!ok) {
- if (depth_rb) {
- intel_delete_renderbuffer(depth_rb);
- }
- if (stencil_rb) {
- intel_delete_renderbuffer(stencil_rb);
- }
- return false;
- }
-
- depth_irb = intel_renderbuffer(depth_rb);
- stencil_irb = intel_renderbuffer(stencil_rb);
-
- intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt);
- intel_miptree_reference(&irb->mt, depth_irb->mt);
-
- depth_rb->Wrapped = rb;
- stencil_rb->Wrapped = rb;
- _mesa_reference_renderbuffer(&irb->wrapped_depth, depth_rb);
- _mesa_reference_renderbuffer(&irb->wrapped_stencil, stencil_rb);
-
- } else {
- irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
- tiling, cpp,
- width, height);
- if (!irb->mt)
- return false;
+ if (width == 0 || height == 0)
+ return true;
- if (intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
- bool ok = intel_miptree_alloc_hiz(intel, irb->mt);
- if (!ok) {
- intel_miptree_release(&irb->mt);
- return false;
- }
- }
- }
+ irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+ width, height,
+ rb->NumSamples);
+ if (!irb->mt)
+ return false;
return true;
}
-#if FEATURE_OES_EGL_image
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
struct gl_renderbuffer *rb,
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- irb->mt = intel_miptree_create_for_region(intel,
- GL_TEXTURE_2D,
- image->format,
- image->region);
+ irb->mt = intel_miptree_create_for_bo(intel,
+ image->region->bo,
+ image->format,
+ image->offset,
+ image->region->width,
+ image->region->height,
+ image->region->pitch,
+ image->region->tiling);
if (!irb->mt)
return;
rb->Width = image->region->width;
rb->Height = image->region->height;
rb->Format = image->format;
- rb->DataType = image->data_type;
rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
image->internal_format);
+ rb->NeedsFinishRenderTexture = true;
}
-#endif
/**
* Called for each hardware renderbuffer when a _window_ is resized.
fb->Initialized = true; /* XXX remove someday */
- if (fb->Name != 0) {
+ if (_mesa_is_user_fbo(fb)) {
return;
}
/**
* Create a new intel_renderbuffer which corresponds to an on-screen window,
* not a user-created renderbuffer.
+ *
+ * \param num_samples must be quantized.
*/
struct intel_renderbuffer *
-intel_create_renderbuffer(gl_format format)
+intel_create_renderbuffer(gl_format format, unsigned num_samples)
{
- GET_CURRENT_CONTEXT(ctx);
-
struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
+
+ GET_CURRENT_CONTEXT(ctx);
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
return NULL;
}
- _mesa_init_renderbuffer(&irb->Base, 0);
- irb->Base.ClassID = INTEL_RB_CLASS;
- irb->Base._BaseFormat = _mesa_get_format_base_format(format);
- irb->Base.Format = format;
- irb->Base.InternalFormat = irb->Base._BaseFormat;
- irb->Base.DataType = intel_mesa_format_to_rb_datatype(format);
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, 0);
+ rb->ClassID = INTEL_RB_CLASS;
+ rb->_BaseFormat = _mesa_get_format_base_format(format);
+ rb->Format = format;
+ rb->InternalFormat = rb->_BaseFormat;
+ rb->NumSamples = num_samples;
/* intel-specific methods */
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_alloc_window_storage;
- irb->Base.GetPointer = intel_get_pointer;
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_window_storage;
return irb;
}
-
-struct gl_renderbuffer*
-intel_create_wrapped_renderbuffer(struct gl_context * ctx,
- int width, int height,
- gl_format format)
+/**
+ * Private window-system buffers (as opposed to ones shared with the display
+ * server created with intel_create_renderbuffer()) are most similar in their
+ * handling to user-created renderbuffers, but they have a resize handler that
+ * may be called at intel_update_renderbuffers() time.
+ *
+ * \param num_samples must be quantized.
+ */
+struct intel_renderbuffer *
+intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
{
- /*
- * The name here is irrelevant, as long as its nonzero, because the
- * renderbuffer never gets entered into Mesa's renderbuffer hash table.
- */
- GLuint name = ~0;
-
- struct intel_renderbuffer *irb = CALLOC_STRUCT(intel_renderbuffer);
- if (!irb) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "creating renderbuffer");
- return NULL;
- }
+ struct intel_renderbuffer *irb;
- struct gl_renderbuffer *rb = &irb->Base;
- _mesa_init_renderbuffer(rb, name);
- rb->ClassID = INTEL_RB_CLASS;
- rb->_BaseFormat = _mesa_get_format_base_format(format);
- rb->Format = format;
- rb->InternalFormat = rb->_BaseFormat;
- rb->DataType = intel_mesa_format_to_rb_datatype(format);
- rb->Width = width;
- rb->Height = height;
+ irb = intel_create_renderbuffer(format, num_samples);
+ irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
- return rb;
+ return irb;
}
-
/**
* Create a new renderbuffer object.
* Typically called via glBindRenderbufferEXT().
{
/*struct intel_context *intel = intel_context(ctx); */
struct intel_renderbuffer *irb;
+ struct gl_renderbuffer *rb;
irb = CALLOC_STRUCT(intel_renderbuffer);
if (!irb) {
return NULL;
}
- _mesa_init_renderbuffer(&irb->Base, name);
- irb->Base.ClassID = INTEL_RB_CLASS;
+ rb = &irb->Base.Base;
+
+ _mesa_init_renderbuffer(rb, name);
+ rb->ClassID = INTEL_RB_CLASS;
/* intel-specific methods */
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_alloc_renderbuffer_storage;
- irb->Base.GetPointer = intel_get_pointer;
+ rb->Delete = intel_delete_renderbuffer;
+ rb->AllocStorage = intel_alloc_renderbuffer_storage;
/* span routines set in alloc_storage function */
- return &irb->Base;
+ return rb;
}
{
DBG("Intel FramebufferRenderbuffer %u %u\n", fb->Name, rb ? rb->Name : 0);
- intel_flush(ctx);
-
_mesa_framebuffer_renderbuffer(ctx, fb, attachment, rb);
intel_draw_buffer(ctx);
}
-static struct intel_renderbuffer*
-intel_renderbuffer_wrap_miptree(struct intel_context *intel,
- struct intel_mipmap_tree *mt,
- uint32_t level,
- uint32_t layer,
- gl_format format,
- GLenum internal_format);
-
-/**
- * \par Special case for separate stencil
- *
- * When wrapping a depthstencil texture that uses separate stencil, this
- * function is recursively called twice: once to create \c
- * irb->wrapped_depth and again to create \c irb->wrapped_stencil. On the
- * call to create \c irb->wrapped_depth, the \c format and \c
- * internal_format parameters do not match \c mt->format. In that case, \c
- * mt->format is MESA_FORMAT_S8_Z24 and \c format is \c
- * MESA_FORMAT_X8_Z24.
- *
- * @return true on success
- */
static bool
intel_renderbuffer_update_wrapper(struct intel_context *intel,
struct intel_renderbuffer *irb,
- struct intel_mipmap_tree *mt,
- uint32_t level,
- uint32_t layer,
- gl_format format,
- GLenum internal_format)
+ struct gl_texture_image *image,
+ uint32_t layer)
{
- struct gl_renderbuffer *rb = &irb->Base;
+ struct gl_renderbuffer *rb = &irb->Base.Base;
+ struct intel_texture_image *intel_image = intel_texture_image(image);
+ struct intel_mipmap_tree *mt = intel_image->mt;
+ int level = image->Level;
- rb->Format = format;
- rb->InternalFormat = internal_format;
- rb->DataType = intel_mesa_format_to_rb_datatype(rb->Format);
- rb->_BaseFormat = _mesa_get_format_base_format(rb->Format);
- rb->Width = mt->level[level].width;
- rb->Height = mt->level[level].height;
+ rb->Depth = image->Depth;
- irb->Base.Delete = intel_delete_renderbuffer;
- irb->Base.AllocStorage = intel_nop_alloc_storage;
+ rb->AllocStorage = intel_nop_alloc_storage;
intel_miptree_check_level_layer(mt, level, layer);
irb->mt_level = level;
- irb->mt_layer = layer;
-
- if (mt->stencil_mt && _mesa_is_depthstencil_format(rb->InternalFormat)) {
- assert((irb->wrapped_depth == NULL) == (irb->wrapped_stencil == NULL));
-
- struct intel_renderbuffer *depth_irb;
- struct intel_renderbuffer *stencil_irb;
-
- if (!irb->wrapped_depth) {
- depth_irb = intel_renderbuffer_wrap_miptree(intel,
- mt, level, layer,
- MESA_FORMAT_X8_Z24,
- GL_DEPTH_COMPONENT24);
- stencil_irb = intel_renderbuffer_wrap_miptree(intel,
- mt->stencil_mt,
- level, layer,
- MESA_FORMAT_S8,
- GL_STENCIL_INDEX8);
- _mesa_reference_renderbuffer(&irb->wrapped_depth, &depth_irb->Base);
- _mesa_reference_renderbuffer(&irb->wrapped_stencil, &stencil_irb->Base);
-
- if (!irb->wrapped_depth || !irb->wrapped_stencil)
- return false;
- } else {
- bool ok = true;
-
- depth_irb = intel_renderbuffer(irb->wrapped_depth);
- stencil_irb = intel_renderbuffer(irb->wrapped_stencil);
-
- ok &= intel_renderbuffer_update_wrapper(intel,
- depth_irb,
- mt,
- level, layer,
- MESA_FORMAT_X8_Z24,
- GL_DEPTH_COMPONENT24);
- ok &= intel_renderbuffer_update_wrapper(intel,
- stencil_irb,
- mt->stencil_mt,
- level, layer,
- MESA_FORMAT_S8,
- GL_STENCIL_INDEX8);
- if (!ok)
- return false;
- }
- intel_miptree_reference(&depth_irb->mt->stencil_mt, stencil_irb->mt);
- intel_miptree_reference(&irb->mt, depth_irb->mt);
- } else {
- intel_miptree_reference(&irb->mt, mt);
- intel_renderbuffer_set_draw_offset(irb);
-
- if (mt->hiz_mt == NULL &&
- intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
- intel_miptree_alloc_hiz(intel, mt);
- if (!mt->hiz_mt)
- return false;
- }
- }
-
- return true;
-}
-
-/**
- * \brief Wrap a renderbuffer around a single slice of a miptree.
- *
- * Called by glFramebufferTexture*(). This just allocates a
- * ``struct intel_renderbuffer`` then calls
- * intel_renderbuffer_update_wrapper() to do the real work.
- *
- * \see intel_renderbuffer_update_wrapper()
- */
-static struct intel_renderbuffer*
-intel_renderbuffer_wrap_miptree(struct intel_context *intel,
- struct intel_mipmap_tree *mt,
- uint32_t level,
- uint32_t layer,
- gl_format format,
- GLenum internal_format)
-
-{
- const GLuint name = ~0; /* not significant, but distinct for debugging */
- struct gl_context *ctx = &intel->ctx;
- struct intel_renderbuffer *irb;
+ switch (mt->msaa_layout) {
+ case INTEL_MSAA_LAYOUT_UMS:
+ case INTEL_MSAA_LAYOUT_CMS:
+ irb->mt_layer = layer * mt->num_samples;
+ break;
- intel_miptree_check_level_layer(mt, level, layer);
-
- irb = CALLOC_STRUCT(intel_renderbuffer);
- if (!irb) {
- _mesa_error(ctx, GL_OUT_OF_MEMORY, "glFramebufferTexture");
- return NULL;
+ default:
+ irb->mt_layer = layer;
}
- _mesa_init_renderbuffer(&irb->Base, name);
- irb->Base.ClassID = INTEL_RB_CLASS;
+ intel_miptree_reference(&irb->mt, mt);
- if (!intel_renderbuffer_update_wrapper(intel, irb,
- mt, level, layer,
- format, internal_format)) {
- free(irb);
- return NULL;
+ intel_renderbuffer_set_draw_offset(irb);
+
+ if (mt->hiz_mt == NULL &&
+ intel->vtbl.is_hiz_depth_format(intel, rb->Format)) {
+ intel_miptree_alloc_hiz(intel, mt);
+ if (!mt->hiz_mt)
+ return false;
}
- return irb;
+ return true;
}
void
/* compute offset of the particular 2D image within the texture region */
intel_miptree_get_image_offset(irb->mt,
irb->mt_level,
- 0, /* face, which we ignore */
irb->mt_layer,
&dst_x, &dst_y);
irb->draw_y = dst_y;
}
-/**
- * Rendering to tiled buffers requires that the base address of the
- * buffer be aligned to a page boundary. We generally render to
- * textures by pointing the surface at the mipmap image level, which
- * may not be aligned to a tile boundary.
- *
- * This function returns an appropriately-aligned base offset
- * according to the tiling restrictions, plus any required x/y offset
- * from there.
- */
-uint32_t
-intel_renderbuffer_tile_offsets(struct intel_renderbuffer *irb,
- uint32_t *tile_x,
- uint32_t *tile_y)
-{
- struct intel_region *region = irb->mt->region;
- int cpp = region->cpp;
- uint32_t pitch = region->pitch * cpp;
-
- if (region->tiling == I915_TILING_NONE) {
- *tile_x = 0;
- *tile_y = 0;
- return irb->draw_x * cpp + irb->draw_y * pitch;
- } else if (region->tiling == I915_TILING_X) {
- *tile_x = irb->draw_x % (512 / cpp);
- *tile_y = irb->draw_y % 8;
- return ((irb->draw_y / 8) * (8 * pitch) +
- (irb->draw_x - *tile_x) / (512 / cpp) * 4096);
- } else {
- assert(region->tiling == I915_TILING_Y);
- *tile_x = irb->draw_x % (128 / cpp);
- *tile_y = irb->draw_y % 32;
- return ((irb->draw_y / 32) * (32 * pitch) +
- (irb->draw_x - *tile_x) / (128 / cpp) * 4096);
- }
-}
-
-#ifndef I915
-static bool
-need_tile_offset_workaround(struct brw_context *brw,
- struct intel_renderbuffer *irb)
-{
- uint32_t tile_x, tile_y;
-
- if (brw->has_surface_tile_offset)
- return false;
-
- intel_renderbuffer_tile_offsets(irb, &tile_x, &tile_y);
-
- return tile_x != 0 || tile_y != 0;
-}
-#endif
-
/**
* Called by glFramebufferTexture[123]DEXT() (and other places) to
* prepare for rendering into texture memory. This might be called
struct gl_renderbuffer_attachment *att)
{
struct intel_context *intel = intel_context(ctx);
- struct gl_texture_image *image = _mesa_get_attachment_teximage(att);
- struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ struct gl_renderbuffer *rb = att->Renderbuffer;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct gl_texture_image *image = rb->TexImage;
struct intel_texture_image *intel_image = intel_texture_image(image);
struct intel_mipmap_tree *mt = intel_image->mt;
+ int layer;
(void) fb;
- int layer;
if (att->CubeMapFace > 0) {
assert(att->Zoffset == 0);
layer = att->CubeMapFace;
/* Fallback on drawing to a texture that doesn't have a miptree
* (has a border, width/height 0, etc.)
*/
- _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
_swrast_render_texture(ctx, fb, att);
return;
}
- else if (!irb) {
- irb = intel_renderbuffer_wrap_miptree(intel,
- mt,
- att->TextureLevel,
- layer,
- image->TexFormat,
- image->InternalFormat);
-
- if (irb) {
- /* bind the wrapper to the attachment point */
- _mesa_reference_renderbuffer(&att->Renderbuffer, &irb->Base);
- }
- else {
- /* fallback to software rendering */
- _swrast_render_texture(ctx, fb, att);
- return;
- }
- }
- if (!intel_renderbuffer_update_wrapper(intel, irb,
- mt, att->TextureLevel, layer,
- image->TexFormat,
- image->InternalFormat)) {
- _mesa_reference_renderbuffer(&att->Renderbuffer, NULL);
+ intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
+
+ if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
_swrast_render_texture(ctx, fb, att);
return;
}
- DBG("Begin render %s texture tex=%u w=%d h=%d refcount=%d\n",
+ DBG("Begin render %s texture tex=%u w=%d h=%d d=%d refcount=%d\n",
_mesa_get_format_name(image->TexFormat),
- att->Texture->Name, image->Width, image->Height,
- irb->Base.RefCount);
-
- intel_image->used_as_render_target = true;
-
-#ifndef I915
- if (need_tile_offset_workaround(brw_context(ctx), irb)) {
- /* Original gen4 hardware couldn't draw to a non-tile-aligned
- * destination in a miptree unless you actually setup your
- * renderbuffer as a miptree and used the fragile
- * lod/array_index/etc. controls to select the image. So,
- * instead, we just make a new single-level miptree and render
- * into that.
- */
- struct intel_context *intel = intel_context(ctx);
- struct intel_mipmap_tree *new_mt;
- int width, height, depth;
+ att->Texture->Name, image->Width, image->Height, image->Depth,
+ rb->RefCount);
- intel_miptree_get_dimensions_for_image(image, &width, &height, &depth);
-
- new_mt = intel_miptree_create(intel, image->TexObject->Target,
- intel_image->base.Base.TexFormat,
- intel_image->base.Base.Level,
- intel_image->base.Base.Level,
- width, height, depth,
- true);
-
- intel_miptree_copy_teximage(intel, intel_image, new_mt);
- intel_renderbuffer_set_draw_offset(irb);
-
- intel_miptree_reference(&irb->mt, intel_image->mt);
- intel_miptree_release(&new_mt);
- }
-#endif
/* update drawing region, etc */
intel_draw_buffer(ctx);
}
* Called by Mesa when rendering to a texture is done.
*/
static void
-intel_finish_render_texture(struct gl_context * ctx,
- struct gl_renderbuffer_attachment *att)
+intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
{
struct intel_context *intel = intel_context(ctx);
- struct gl_texture_object *tex_obj = att->Texture;
- struct gl_texture_image *image =
- tex_obj->Image[att->CubeMapFace][att->TextureLevel];
- struct intel_texture_image *intel_image = intel_texture_image(image);
- DBG("Finish render %s texture tex=%u\n",
- _mesa_get_format_name(image->TexFormat), att->Texture->Name);
-
- /* Flag that this image may now be validated into the object's miptree. */
- if (intel_image)
- intel_image->used_as_render_target = false;
+ DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
/* Since we've (probably) rendered to the texture and will (likely) use
* it in the texture domain later on in this batchbuffer, flush the
intel_batchbuffer_emit_mi_flush(intel);
}
+#define fbo_incomplete(fb, ...) do { \
+ static GLuint msg_id = 0; \
+ if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
+ _mesa_gl_debug(ctx, &msg_id, \
+ MESA_DEBUG_TYPE_OTHER, \
+ MESA_DEBUG_SEVERITY_MEDIUM, \
+ __VA_ARGS__); \
+ } \
+ DBG(__VA_ARGS__); \
+ fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED; \
+ } while (0)
+
/**
* Do additional "completeness" testing of a framebuffer object.
*/
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
struct intel_context *intel = intel_context(ctx);
- const struct intel_renderbuffer *depthRb =
+ struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH);
- const struct intel_renderbuffer *stencilRb =
+ struct intel_renderbuffer *stencilRb =
intel_get_renderbuffer(fb, BUFFER_STENCIL);
+ struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
int i;
- /*
- * The depth and stencil renderbuffers are the same renderbuffer or wrap
- * the same texture.
- */
- if (depthRb && stencilRb) {
- bool depth_stencil_are_same;
- if (depthRb == stencilRb)
- depth_stencil_are_same = true;
- else if ((fb->Attachment[BUFFER_DEPTH].Type == GL_TEXTURE) &&
- (fb->Attachment[BUFFER_STENCIL].Type == GL_TEXTURE) &&
- (fb->Attachment[BUFFER_DEPTH].Texture->Name ==
- fb->Attachment[BUFFER_STENCIL].Texture->Name))
- depth_stencil_are_same = true;
- else
- depth_stencil_are_same = false;
+ DBG("%s() on fb %p (%s)\n", __FUNCTION__,
+ fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
+ (fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
+
+ if (depthRb)
+ depth_mt = depthRb->mt;
+ if (stencilRb) {
+ stencil_mt = stencilRb->mt;
+ if (stencil_mt->stencil_mt)
+ stencil_mt = stencil_mt->stencil_mt;
+ }
- if (!intel->has_separate_stencil && !depth_stencil_are_same) {
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ if (depth_mt && stencil_mt) {
+ if (depth_mt == stencil_mt) {
+ /* For true packed depth/stencil (not faked on prefers-separate-stencil
+ * hardware) we need to be sure they're the same level/layer, since
+ * we'll be emitting a single packet describing the packed setup.
+ */
+ if (depthRb->mt_level != stencilRb->mt_level ||
+ depthRb->mt_layer != stencilRb->mt_layer) {
+ fbo_incomplete(fb,
+ "FBO incomplete: depth image level/layer %d/%d != "
+ "stencil image %d/%d\n",
+ depthRb->mt_level,
+ depthRb->mt_layer,
+ stencilRb->mt_level,
+ stencilRb->mt_layer);
+ }
+ } else {
+ if (!intel->has_separate_stencil) {
+ fbo_incomplete(fb, "FBO incomplete: separate stencil "
+ "unsupported\n");
+ }
+ if (stencil_mt->format != MESA_FORMAT_S8) {
+ fbo_incomplete(fb, "FBO incomplete: separate stencil is %s "
+ "instead of S8\n",
+ _mesa_get_format_name(stencil_mt->format));
+ }
+ if (intel->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
+ /* Before Gen7, separate depth and stencil buffers can be used
+ * only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
+ * Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
+ * [DevSNB]: This field must be set to the same value (enabled
+ * or disabled) as Hierarchical Depth Buffer Enable.
+ */
+ fbo_incomplete(fb, "FBO incomplete: separate stencil "
+ "without HiZ\n");
+ }
}
}
*/
rb = fb->Attachment[i].Renderbuffer;
if (rb == NULL) {
- DBG("attachment without renderbuffer\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: attachment without "
+ "renderbuffer\n");
continue;
}
+ if (fb->Attachment[i].Type == GL_TEXTURE) {
+ if (rb->TexImage->Border) {
+ fbo_incomplete(fb, "FBO incomplete: texture with border\n");
+ continue;
+ }
+ }
+
irb = intel_renderbuffer(rb);
if (irb == NULL) {
- DBG("software rendering renderbuffer\n");
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ fbo_incomplete(fb, "FBO incomplete: software rendering "
+ "renderbuffer\n");
continue;
}
- if (!intel->vtbl.render_target_supported(intel, irb->Base.Format)) {
- DBG("Unsupported HW texture/renderbuffer format attached: %s\n",
- _mesa_get_format_name(irb->Base.Format));
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
+ if (!intel->vtbl.render_target_supported(intel, rb)) {
+ fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
+ "texture/renderbuffer format attached: %s\n",
+ _mesa_get_format_name(intel_rb_format(irb)));
}
-
-#ifdef I915
- if (!intel_span_supports_format(irb->Base.Format)) {
- DBG("Unsupported swrast texture/renderbuffer format attached: %s\n",
- _mesa_get_format_name(irb->Base.Format));
- fb->_Status = GL_FRAMEBUFFER_UNSUPPORTED_EXT;
- }
-#endif
}
}
GLbitfield mask, GLenum filter)
{
if (mask & GL_COLOR_BUFFER_BIT) {
+ GLint i;
const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
const struct gl_framebuffer *readFb = ctx->ReadBuffer;
- const struct gl_renderbuffer_attachment *drawAtt =
- &drawFb->Attachment[drawFb->_ColorDrawBufferIndexes[0]];
-
- /* If the source and destination are the same size with no
- mirroring, the rectangles are within the size of the
- texture and there is no scissor then we can use
- glCopyTexSubimage2D to implement the blit. This will end
- up as a fast hardware blit on some drivers */
- if (drawAtt && drawAtt->Texture &&
- srcX0 - srcX1 == dstX0 - dstX1 &&
- srcY0 - srcY1 == dstY0 - dstY1 &&
- srcX1 >= srcX0 &&
- srcY1 >= srcY0 &&
- srcX0 >= 0 && srcX1 <= readFb->Width &&
- srcY0 >= 0 && srcY1 <= readFb->Height &&
- dstX0 >= 0 && dstX1 <= drawFb->Width &&
- dstY0 >= 0 && dstY1 <= drawFb->Height &&
- !ctx->Scissor.Enabled) {
- const struct gl_texture_object *texObj = drawAtt->Texture;
- const GLuint dstLevel = drawAtt->TextureLevel;
- const GLenum target = texObj->Target;
-
- struct gl_texture_image *texImage =
- _mesa_select_tex_image(ctx, texObj, target, dstLevel);
-
- if (intel_copy_texsubimage(intel_context(ctx),
- intel_texture_image(texImage),
- dstX0, dstY0,
- srcX0, srcY0,
- srcX1 - srcX0, /* width */
- srcY1 - srcY0))
- mask &= ~GL_COLOR_BUFFER_BIT;
+ const struct gl_renderbuffer_attachment *drawAtt;
+ struct intel_renderbuffer *srcRb =
+ intel_renderbuffer(readFb->_ColorReadBuffer);
+
+ /* If the source and destination are the same size with no mirroring,
+ * the rectangles are within the size of the texture and there is no
+ * scissor then we can use glCopyTexSubimage2D to implement the blit.
+ * This will end up as a fast hardware blit on some drivers.
+ */
+ const GLboolean use_intel_copy_texsubimage =
+ srcX0 - srcX1 == dstX0 - dstX1 &&
+ srcY0 - srcY1 == dstY0 - dstY1 &&
+ srcX1 >= srcX0 &&
+ srcY1 >= srcY0 &&
+ srcX0 >= 0 && srcX1 <= readFb->Width &&
+ srcY0 >= 0 && srcY1 <= readFb->Height &&
+ dstX0 >= 0 && dstX1 <= drawFb->Width &&
+ dstY0 >= 0 && dstY1 <= drawFb->Height &&
+ !ctx->Scissor.Enabled;
+
+ /* Verify that all the draw buffers can be blitted using
+ * intel_copy_texsubimage().
+ */
+ for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
+ int idx = ctx->DrawBuffer->_ColorDrawBufferIndexes[i];
+ if (idx == -1)
+ continue;
+ drawAtt = &drawFb->Attachment[idx];
+
+ if (srcRb && drawAtt && drawAtt->Texture &&
+ use_intel_copy_texsubimage)
+ continue;
+ else
+ return mask;
+ }
+
+ /* Blit to all active draw buffers */
+ for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
+ int idx = ctx->DrawBuffer->_ColorDrawBufferIndexes[i];
+ if (idx == -1)
+ continue;
+ drawAtt = &drawFb->Attachment[idx];
+
+ {
+ const struct gl_texture_object *texObj = drawAtt->Texture;
+ const GLuint dstLevel = drawAtt->TextureLevel;
+ const GLenum target = texObj->Target;
+
+ struct gl_texture_image *texImage =
+ _mesa_select_tex_image(ctx, texObj, target, dstLevel);
+
+ if (!intel_copy_texsubimage(intel_context(ctx),
+ intel_texture_image(texImage),
+ dstX0, dstY0,
+ srcRb,
+ srcX0, srcY0,
+ srcX1 - srcX0, /* width */
+ srcY1 - srcY0))
+ return mask;
+ }
}
+
+ mask &= ~GL_COLOR_BUFFER_BIT;
}
return mask;
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- /* Try faster, glCopyTexSubImage2D approach first which uses the BLT. */
+#ifndef I915
+ mask = brw_blorp_framebuffer(intel_context(ctx),
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+#endif
+
+ /* Try glCopyTexSubImage2D approach which uses the BLT. */
mask = intel_blit_framebuffer_copy_tex_sub_image(ctx,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
if (mask == 0x0)
return;
+
_mesa_meta_BlitFramebuffer(ctx,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
}
+/**
+ * This is a no-op except on multisample buffers shared with DRI2.
+ */
+void
+intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
+{
+ if (irb->mt && irb->mt->singlesample_mt)
+ irb->mt->need_downsample = true;
+}
+
+/**
+ * Does the renderbuffer have hiz enabled?
+ */
+bool
+intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
+{
+ return intel_miptree_slice_has_hiz(irb->mt, irb->mt_level, irb->mt_layer);
+}
+
void
intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
{
intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
irb->mt_level,
irb->mt_layer);
- } else if (irb->wrapped_depth) {
- intel_renderbuffer_set_needs_hiz_resolve(
- intel_renderbuffer(irb->wrapped_depth));
- } else {
- return;
}
}
intel_miptree_slice_set_needs_depth_resolve(irb->mt,
irb->mt_level,
irb->mt_layer);
- } else if (irb->wrapped_depth) {
- intel_renderbuffer_set_needs_depth_resolve(
- intel_renderbuffer(irb->wrapped_depth));
- } else {
- return;
}
}
irb->mt,
irb->mt_level,
irb->mt_layer);
- if (irb->wrapped_depth)
- return intel_renderbuffer_resolve_hiz(intel,
- intel_renderbuffer(irb->wrapped_depth));
return false;
}
irb->mt_level,
irb->mt_layer);
- if (irb->wrapped_depth)
- return intel_renderbuffer_resolve_depth(intel,
- intel_renderbuffer(irb->wrapped_depth));
-
return false;
}
+void
+intel_renderbuffer_move_to_temp(struct intel_context *intel,
+ struct intel_renderbuffer *irb,
+ bool invalidate)
+{
+ struct gl_renderbuffer *rb =&irb->Base.Base;
+ struct intel_texture_image *intel_image = intel_texture_image(rb->TexImage);
+ struct intel_mipmap_tree *new_mt;
+ int width, height, depth;
+
+ intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
+
+ new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
+ intel_image->base.Base.TexFormat,
+ intel_image->base.Base.Level,
+ intel_image->base.Base.Level,
+ width, height, depth,
+ true,
+ irb->mt->num_samples,
+ INTEL_MIPTREE_TILING_ANY);
+
+ if (intel->vtbl.is_hiz_depth_format(intel, new_mt->format)) {
+ intel_miptree_alloc_hiz(intel, new_mt);
+ }
+
+ intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
+
+ intel_miptree_reference(&irb->mt, intel_image->mt);
+ intel_renderbuffer_set_draw_offset(irb);
+ intel_miptree_release(&new_mt);
+}
+
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
intel->ctx.Driver.ResizeBuffers = intel_resize_buffers;
intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
-
-#if FEATURE_OES_EGL_image
intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
intel_image_target_renderbuffer_storage;
-#endif
}