/**************************************************************************
- *
- * Copyright 2006 Tungsten Graphics, Inc., Cedar Park, Texas.
+ *
+ * Copyright 2006 VMware, Inc.
* All Rights Reserved.
- *
+ *
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
- *
+ *
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
- *
+ *
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
- * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
+ *
**************************************************************************/
#include "main/context.h"
#include "main/teximage.h"
#include "main/image.h"
+#include "main/hash_table.h"
+#include "main/set.h"
+#include "main/condrender.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
-#include "intel_context.h"
#include "intel_batchbuffer.h"
#include "intel_buffers.h"
#include "intel_blit.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
#include "intel_regions.h"
+#include "intel_screen.h"
#include "intel_tex.h"
#include "brw_context.h"
ASSERT(irb);
intel_miptree_release(&irb->mt);
+ intel_miptree_release(&irb->singlesample_mt);
_mesa_delete_renderbuffer(ctx, rb);
}
+/**
+ * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_renderbuffer_downsample(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
+{
+ if (!irb->need_downsample)
+ return;
+ intel_miptree_updownsample(brw, irb->mt, irb->singlesample_mt);
+ irb->need_downsample = false;
+}
+
+/**
+ * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_renderbuffer_upsample(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
+{
+ assert(!irb->need_downsample);
+
+ intel_miptree_updownsample(brw, irb->singlesample_mt, irb->mt);
+}
+
/**
* \see dd_function_table::MapRenderbuffer
*/
GLubyte **out_map,
GLint *out_stride)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct intel_mipmap_tree *mt;
void *map;
int stride;
return;
}
- intel_prepare_render(intel);
+ intel_prepare_render(brw);
+
+ /* The MapRenderbuffer API should always return a single-sampled mapping.
+ * The case we are asked to map multisampled RBs is in glReadPixels() (or
+ * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
+ * and GL expects an automatic resolve to happen.
+ *
+ * If it's a color miptree, there is a ->singlesample_mt which wraps the
+ * actual window system renderbuffer (which we may resolve to at any time),
+ * while the miptree itself is our driver-private allocation. If it's a
+ * depth or stencil miptree, we have a private MSAA buffer and no shared
+ * singlesample buffer, and since we don't expect anybody to ever actually
+ * resolve it, we just make a temporary singlesample buffer now when we
+ * have to.
+ */
+ if (rb->NumSamples > 1) {
+ if (!irb->singlesample_mt) {
+ irb->singlesample_mt =
+ intel_miptree_create_for_renderbuffer(brw, irb->mt->format,
+ rb->Width, rb->Height,
+ 0 /*num_samples*/);
+ if (!irb->singlesample_mt)
+ goto fail;
+ irb->singlesample_mt_is_tmp = true;
+ irb->need_downsample = true;
+ }
+
+ intel_renderbuffer_downsample(brw, irb);
+ mt = irb->singlesample_mt;
+
+ irb->need_map_upsample = mode & GL_MAP_WRITE_BIT;
+ } else {
+ mt = irb->mt;
+ }
/* For a window-system renderbuffer, we need to flip the mapping we receive
* upside-down. So we need to ask for a rectangle on flipped vertically, and
y = rb->Height - y - h;
}
- intel_miptree_map(intel, irb->mt, irb->mt_level, irb->mt_layer,
+ intel_miptree_map(brw, mt, irb->mt_level, irb->mt_layer,
x, y, w, h, mode, &map, &stride);
if (rb->Name == 0) {
*out_map = map;
*out_stride = stride;
+ return;
+
+fail:
+ *out_map = NULL;
+ *out_stride = 0;
}
/**
intel_unmap_renderbuffer(struct gl_context *ctx,
struct gl_renderbuffer *rb)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct intel_mipmap_tree *mt;
DBG("%s: rb %d (%s)\n", __FUNCTION__,
rb->Name, _mesa_get_format_name(rb->Format));
return;
}
- intel_miptree_unmap(intel, irb->mt, irb->mt_level, irb->mt_layer);
+ if (rb->NumSamples > 1) {
+ mt = irb->singlesample_mt;
+ } else {
+ mt = irb->mt;
+ }
+
+ intel_miptree_unmap(brw, mt, irb->mt_level, irb->mt_layer);
+
+ if (irb->need_map_upsample) {
+ intel_renderbuffer_upsample(brw, irb);
+ irb->need_map_upsample = false;
+ }
+
+ if (irb->singlesample_mt_is_tmp)
+ intel_miptree_release(&irb->singlesample_mt);
}
unsigned
intel_quantize_num_samples(struct intel_screen *intel, unsigned num_samples)
{
- switch (intel->gen) {
- case 6:
- /* Gen6 supports only 4x multisampling. */
- if (num_samples > 0)
- return 4;
- else
- return 0;
- case 7:
- /* Gen7 supports 4x and 8x multisampling. */
- if (num_samples > 4)
- return 8;
- else if (num_samples > 0)
- return 4;
+ const int *msaa_modes = intel_supported_msaa_modes(intel);
+ int quantized_samples = 0;
+
+ for (int i = 0; msaa_modes[i] != -1; ++i) {
+ if (msaa_modes[i] >= num_samples)
+ quantized_samples = msaa_modes[i];
else
- return 0;
- return 0;
- default:
- /* MSAA unsupported. */
- return 0;
+ break;
}
-}
+ return quantized_samples;
+}
-/**
- * Called via glRenderbufferStorageEXT() to set the format and allocate
- * storage for a user-created renderbuffer.
- */
-static GLboolean
-intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
- GLenum internalFormat,
- GLuint width, GLuint height)
+static mesa_format
+intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
{
- struct intel_context *intel = intel_context(ctx);
- struct intel_screen *screen = intel->intelScreen;
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
+ struct brw_context *brw = brw_context(ctx);
switch (internalFormat) {
default:
* except they're less useful because you can't texture with
* them.
*/
- rb->Format = intel->ctx.Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
- internalFormat,
- GL_NONE, GL_NONE);
+ return ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
+ GL_NONE, GL_NONE);
break;
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX8_EXT:
case GL_STENCIL_INDEX16_EXT:
/* These aren't actual texture formats, so force them here. */
- if (intel->has_separate_stencil) {
- rb->Format = MESA_FORMAT_S8;
+ if (brw->has_separate_stencil) {
+ return MESA_FORMAT_S_UINT8;
} else {
- assert(!intel->must_use_separate_stencil);
- rb->Format = MESA_FORMAT_S8_Z24;
+ assert(!brw->must_use_separate_stencil);
+ return MESA_FORMAT_Z24_UNORM_S8_UINT;
}
- break;
}
+}
+
+static GLboolean
+intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_screen *screen = brw->intelScreen;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ assert(rb->Format != MESA_FORMAT_NONE);
+
+ rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
rb->Width = width;
rb->Height = height;
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
if (width == 0 || height == 0)
return true;
- irb->mt = intel_miptree_create_for_renderbuffer(intel, rb->Format,
+ irb->mt = intel_miptree_create_for_renderbuffer(brw, rb->Format,
width, height,
rb->NumSamples);
if (!irb->mt)
return false;
+ irb->layer_count = 1;
+
return true;
}
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ rb->Format = intel_renderbuffer_format(ctx, internalFormat);
+ return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height);
+}
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
struct gl_renderbuffer *rb,
void *image_handle)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_renderbuffer *irb;
__DRIscreen *screen;
__DRIimage *image;
- screen = intel->intelScreen->driScrnPriv;
+ screen = brw->intelScreen->driScrnPriv;
image = screen->dri2.image->lookupEGLImage(screen, image_handle,
screen->loaderPrivate);
if (image == NULL)
return;
+ if (image->planar_format && image->planar_format->nplanes > 1) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glEGLImageTargetRenderbufferStorage(planar buffers are not "
+ "supported as render targets.");
+ return;
+ }
+
+ /* Buffers originating from outside are for read-only. */
+ if (image->dma_buf_imported) {
+ _mesa_error(ctx, GL_INVALID_OPERATION,
+ "glEGLImageTargetRenderbufferStorage(dma buffers are read-only)");
+ return;
+ }
+
/* __DRIimage is opaque to the core so it has to be checked here */
switch (image->format) {
- case MESA_FORMAT_RGBA8888_REV:
- _mesa_error(&intel->ctx, GL_INVALID_OPERATION,
+ case MESA_FORMAT_R8G8B8A8_UNORM:
+ _mesa_error(ctx, GL_INVALID_OPERATION,
"glEGLImageTargetRenderbufferStorage(unsupported image format");
return;
break;
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
- irb->mt = intel_miptree_create_for_bo(intel,
- image->region->bo,
+ irb->mt = intel_miptree_create_for_bo(brw,
+ image->bo,
image->format,
image->offset,
- image->region->width,
- image->region->height,
- image->region->pitch,
- image->region->tiling);
+ image->width,
+ image->height,
+ image->pitch);
if (!irb->mt)
return;
rb->InternalFormat = image->internal_format;
- rb->Width = image->region->width;
- rb->Height = image->region->height;
+ rb->Width = image->width;
+ rb->Height = image->height;
rb->Format = image->format;
- rb->_BaseFormat = _mesa_base_fbo_format(&intel->ctx,
- image->internal_format);
+ rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
rb->NeedsFinishRenderTexture = true;
+ irb->layer_count = 1;
}
/**
* \param num_samples must be quantized.
*/
struct intel_renderbuffer *
-intel_create_renderbuffer(gl_format format, unsigned num_samples)
+intel_create_renderbuffer(mesa_format format, unsigned num_samples)
{
struct intel_renderbuffer *irb;
struct gl_renderbuffer *rb;
}
rb = &irb->Base.Base;
+ irb->layer_count = 1;
_mesa_init_renderbuffer(rb, 0);
rb->ClassID = INTEL_RB_CLASS;
* \param num_samples must be quantized.
*/
struct intel_renderbuffer *
-intel_create_private_renderbuffer(gl_format format, unsigned num_samples)
+intel_create_private_renderbuffer(mesa_format format, unsigned num_samples)
{
struct intel_renderbuffer *irb;
irb = intel_create_renderbuffer(format, num_samples);
- irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
+ irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage;
return irb;
}
static struct gl_renderbuffer *
intel_new_renderbuffer(struct gl_context * ctx, GLuint name)
{
- /*struct intel_context *intel = intel_context(ctx); */
struct intel_renderbuffer *irb;
struct gl_renderbuffer *rb;
}
static bool
-intel_renderbuffer_update_wrapper(struct intel_context *intel,
+intel_renderbuffer_update_wrapper(struct brw_context *brw,
struct intel_renderbuffer *irb,
- struct gl_texture_image *image,
- uint32_t layer)
+ struct gl_texture_image *image,
+ uint32_t layer,
+ bool layered)
{
struct gl_renderbuffer *rb = &irb->Base.Base;
struct intel_texture_image *intel_image = intel_texture_image(image);
struct intel_mipmap_tree *mt = intel_image->mt;
int level = image->Level;
- rb->Depth = image->Depth;
-
rb->AllocStorage = intel_nop_alloc_storage;
+ /* adjust for texture view parameters */
+ layer += image->TexObject->MinLayer;
+ level += image->TexObject->MinLevel;
+
intel_miptree_check_level_layer(mt, level, layer);
irb->mt_level = level;
+ int layer_multiplier;
switch (mt->msaa_layout) {
case INTEL_MSAA_LAYOUT_UMS:
case INTEL_MSAA_LAYOUT_CMS:
- irb->mt_layer = layer * mt->num_samples;
+ layer_multiplier = mt->num_samples;
break;
default:
- irb->mt_layer = layer;
+ layer_multiplier = 1;
+ }
+
+ irb->mt_layer = layer_multiplier * layer;
+
+ if (layered) {
+ irb->layer_count = image->TexObject->NumLayers ?: mt->level[level].depth / layer_multiplier;
+ } else {
+ irb->layer_count = 1;
}
intel_miptree_reference(&irb->mt, mt);
intel_renderbuffer_set_draw_offset(irb);
- if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(intel, rb->Format)) {
- intel_miptree_alloc_hiz(intel, mt);
+ if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
+ intel_miptree_alloc_hiz(brw, mt);
if (!mt->hiz_mt)
return false;
}
struct gl_framebuffer *fb,
struct gl_renderbuffer_attachment *att)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct gl_renderbuffer *rb = att->Renderbuffer;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
struct gl_texture_image *image = rb->TexImage;
intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
- if (!intel_renderbuffer_update_wrapper(intel, irb, image, layer)) {
+ if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer, att->Layered)) {
_swrast_render_texture(ctx, fb, att);
return;
}
}
-/**
- * Called by Mesa when rendering to a texture is done.
- */
-static void
-intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
-{
- struct intel_context *intel = intel_context(ctx);
-
- DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
-
- /* Since we've (probably) rendered to the texture and will (likely) use
- * it in the texture domain later on in this batchbuffer, flush the
- * batch. Once again, we wish for a domain tracker in libdrm to cover
- * usage inside of a batchbuffer like GEM does in the kernel.
- */
- intel_batchbuffer_emit_mi_flush(intel);
-}
-
#define fbo_incomplete(fb, ...) do { \
static GLuint msg_id = 0; \
if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
static void
intel_validate_framebuffer(struct gl_context *ctx, struct gl_framebuffer *fb)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
struct intel_renderbuffer *depthRb =
intel_get_renderbuffer(fb, BUFFER_DEPTH);
struct intel_renderbuffer *stencilRb =
}
if (depth_mt && stencil_mt) {
+ if (brw->gen >= 7) {
+ /* For gen >= 7, we are using the lod/minimum-array-element fields
+ * and supportting layered rendering. This means that we must restrict
+ * the depth & stencil attachments to match in various more retrictive
+ * ways. (width, height, depth, LOD and layer)
+ */
+ if (depth_mt->physical_width0 != stencil_mt->physical_width0 ||
+ depth_mt->physical_height0 != stencil_mt->physical_height0 ||
+ depth_mt->physical_depth0 != stencil_mt->physical_depth0 ||
+ depthRb->mt_level != stencilRb->mt_level ||
+ depthRb->mt_layer != stencilRb->mt_layer) {
+ fbo_incomplete(fb,
+ "FBO incomplete: depth and stencil must match in"
+ "width, height, depth, LOD and layer\n");
+ }
+ }
if (depth_mt == stencil_mt) {
/* For true packed depth/stencil (not faked on prefers-separate-stencil
* hardware) we need to be sure they're the same level/layer, since
stencilRb->mt_layer);
}
} else {
- if (!intel->has_separate_stencil) {
+ if (!brw->has_separate_stencil) {
fbo_incomplete(fb, "FBO incomplete: separate stencil "
"unsupported\n");
}
- if (stencil_mt->format != MESA_FORMAT_S8) {
+ if (stencil_mt->format != MESA_FORMAT_S_UINT8) {
fbo_incomplete(fb, "FBO incomplete: separate stencil is %s "
"instead of S8\n",
_mesa_get_format_name(stencil_mt->format));
}
- if (intel->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
+ if (brw->gen < 7 && !intel_renderbuffer_has_hiz(depthRb)) {
/* Before Gen7, separate depth and stencil buffers can be used
* only if HiZ is enabled. From the Sandybridge PRM, Volume 2,
* Part 1, Bit 3DSTATE_DEPTH_BUFFER.SeparateStencilBufferEnable:
continue;
}
- if (!intel->vtbl.render_target_supported(intel, rb)) {
+ if (!brw_render_target_supported(brw, rb)) {
fbo_incomplete(fb, "FBO incomplete: Unsupported HW "
"texture/renderbuffer format attached: %s\n",
_mesa_get_format_name(intel_rb_format(irb)));
GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
+
+ /* Sync up the state of window system buffers. We need to do this before
+ * we go looking for the buffers.
+ */
+ intel_prepare_render(brw);
if (mask & GL_COLOR_BUFFER_BIT) {
GLint i;
srcY0 >= 0 && srcY1 <= readFb->Height &&
dstX0 >= 0 && dstX1 <= drawFb->Width &&
dstY0 >= 0 && dstY1 <= drawFb->Height &&
- !ctx->Scissor.Enabled)) {
+ !(ctx->Scissor.EnableFlags))) {
perf_debug("glBlitFramebuffer(): non-1:1 blit. "
"Falling back to software rendering.\n");
return mask;
return mask;
}
- gl_format src_format = _mesa_get_srgb_format_linear(src_rb->Format);
- gl_format dst_format = _mesa_get_srgb_format_linear(dst_rb->Format);
- if (src_format != dst_format) {
- perf_debug("glBlitFramebuffer(): unsupported blit from %s to %s. "
- "Falling back to software rendering.\n",
- _mesa_get_format_name(src_format),
- _mesa_get_format_name(dst_format));
- return mask;
- }
-
- if (!intel_miptree_blit(intel,
+ if (!intel_miptree_blit(brw,
src_irb->mt,
src_irb->mt_level, src_irb->mt_layer,
srcX0, srcY0, src_rb->Name == 0,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- mask = brw_blorp_framebuffer(intel_context(ctx),
+ /* Page 679 of OpenGL 4.4 spec says:
+ * "Added BlitFramebuffer to commands affected by conditional rendering in
+ * section 10.10 (Bug 9562)."
+ */
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ mask = brw_blorp_framebuffer(brw_context(ctx),
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
mask, filter);
}
-/**
- * This is a no-op except on multisample buffers shared with DRI2.
- */
-void
-intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
-{
- if (irb->mt && irb->mt->singlesample_mt)
- irb->mt->need_downsample = true;
-}
-
/**
* Does the renderbuffer have hiz enabled?
*/
return intel_miptree_slice_has_hiz(irb->mt, irb->mt_level, irb->mt_layer);
}
-void
-intel_renderbuffer_set_needs_hiz_resolve(struct intel_renderbuffer *irb)
-{
- if (irb->mt) {
- intel_miptree_slice_set_needs_hiz_resolve(irb->mt,
- irb->mt_level,
- irb->mt_layer);
- }
-}
-
-void
-intel_renderbuffer_set_needs_depth_resolve(struct intel_renderbuffer *irb)
-{
- if (irb->mt) {
- intel_miptree_slice_set_needs_depth_resolve(irb->mt,
- irb->mt_level,
- irb->mt_layer);
- }
-}
-
bool
-intel_renderbuffer_resolve_hiz(struct intel_context *intel,
+intel_renderbuffer_resolve_hiz(struct brw_context *brw,
struct intel_renderbuffer *irb)
{
if (irb->mt)
- return intel_miptree_slice_resolve_hiz(intel,
+ return intel_miptree_slice_resolve_hiz(brw,
irb->mt,
irb->mt_level,
irb->mt_layer);
return false;
}
+void
+intel_renderbuffer_att_set_needs_depth_resolve(struct gl_renderbuffer_attachment *att)
+{
+ struct intel_renderbuffer *irb = intel_renderbuffer(att->Renderbuffer);
+ if (irb->mt) {
+ if (att->Layered) {
+ intel_miptree_set_all_slices_need_depth_resolve(irb->mt, irb->mt_level);
+ } else {
+ intel_miptree_slice_set_needs_depth_resolve(irb->mt,
+ irb->mt_level,
+ irb->mt_layer);
+ }
+ }
+}
+
bool
-intel_renderbuffer_resolve_depth(struct intel_context *intel,
+intel_renderbuffer_resolve_depth(struct brw_context *brw,
struct intel_renderbuffer *irb)
{
if (irb->mt)
- return intel_miptree_slice_resolve_depth(intel,
+ return intel_miptree_slice_resolve_depth(brw,
irb->mt,
irb->mt_level,
irb->mt_layer);
}
void
-intel_renderbuffer_move_to_temp(struct intel_context *intel,
+intel_renderbuffer_move_to_temp(struct brw_context *brw,
struct intel_renderbuffer *irb,
bool invalidate)
{
intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
- new_mt = intel_miptree_create(intel, rb->TexImage->TexObject->Target,
+ new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
intel_image->base.Base.TexFormat,
intel_image->base.Base.Level,
intel_image->base.Base.Level,
irb->mt->num_samples,
INTEL_MIPTREE_TILING_ANY);
- if (brw_is_hiz_depth_format(intel, new_mt->format)) {
- intel_miptree_alloc_hiz(intel, new_mt);
+ if (brw_is_hiz_depth_format(brw, new_mt->format)) {
+ intel_miptree_alloc_hiz(brw, new_mt);
}
- intel_miptree_copy_teximage(intel, intel_image, new_mt, invalidate);
+ intel_miptree_copy_teximage(brw, intel_image, new_mt, invalidate);
intel_miptree_reference(&irb->mt, intel_image->mt);
intel_renderbuffer_set_draw_offset(irb);
intel_miptree_release(&new_mt);
}
+void
+brw_render_cache_set_clear(struct brw_context *brw)
+{
+ struct set_entry *entry;
+
+ set_foreach(brw->render_cache, entry) {
+ _mesa_set_remove(brw->render_cache, entry);
+ }
+}
+
+void
+brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo)
+{
+ _mesa_set_add(brw->render_cache, _mesa_hash_pointer(bo), bo);
+}
+
+/**
+ * Emits an appropriate flush for a BO if it has been rendered to within the
+ * same batchbuffer as a read that's about to be emitted.
+ *
+ * The GPU has separate, incoherent caches for the render cache and the
+ * sampler cache, along with other caches. Usually data in the different
+ * caches don't interact (e.g. we don't render to our driver-generated
+ * immediate constant data), but for render-to-texture in FBOs we definitely
+ * do. When a batchbuffer is flushed, the kernel will ensure that everything
+ * necessary is flushed before another use of that BO, but for reuse from
+ * different caches within a batchbuffer, it's all our responsibility.
+ */
+void
+brw_render_cache_set_check_flush(struct brw_context *brw, drm_intel_bo *bo)
+{
+ if (!_mesa_set_search(brw->render_cache, _mesa_hash_pointer(bo), bo))
+ return;
+
+ intel_batchbuffer_emit_mi_flush(brw);
+}
+
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
*/
void
-intel_fbo_init(struct intel_context *intel)
+intel_fbo_init(struct brw_context *brw)
{
- intel->ctx.Driver.NewFramebuffer = intel_new_framebuffer;
- intel->ctx.Driver.NewRenderbuffer = intel_new_renderbuffer;
- intel->ctx.Driver.MapRenderbuffer = intel_map_renderbuffer;
- intel->ctx.Driver.UnmapRenderbuffer = intel_unmap_renderbuffer;
- intel->ctx.Driver.RenderTexture = intel_render_texture;
- intel->ctx.Driver.FinishRenderTexture = intel_finish_render_texture;
- intel->ctx.Driver.ValidateFramebuffer = intel_validate_framebuffer;
- intel->ctx.Driver.BlitFramebuffer = intel_blit_framebuffer;
- intel->ctx.Driver.EGLImageTargetRenderbufferStorage =
+ struct dd_function_table *dd = &brw->ctx.Driver;
+ dd->NewFramebuffer = intel_new_framebuffer;
+ dd->NewRenderbuffer = intel_new_renderbuffer;
+ dd->MapRenderbuffer = intel_map_renderbuffer;
+ dd->UnmapRenderbuffer = intel_unmap_renderbuffer;
+ dd->RenderTexture = intel_render_texture;
+ dd->ValidateFramebuffer = intel_validate_framebuffer;
+ dd->BlitFramebuffer = intel_blit_framebuffer;
+ dd->EGLImageTargetRenderbufferStorage =
intel_image_target_renderbuffer_storage;
+
+ brw->render_cache = _mesa_set_create(brw, _mesa_key_pointer_equal);
}