-/**************************************************************************
- *
+/*
* Copyright 2006 VMware, Inc.
* All Rights Reserved.
*
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
- * distribute, sub license, and/or sell copies of the Software, and to
+ * distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
* OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
* ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
* TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
- *
- **************************************************************************/
-
+ */
#include "main/enums.h"
#include "main/imports.h"
#include "main/context.h"
#include "main/teximage.h"
#include "main/image.h"
+#include "main/condrender.h"
+#include "util/hash_table.h"
+#include "util/set.h"
#include "swrast/swrast.h"
#include "drivers/common/meta.h"
#include "intel_blit.h"
#include "intel_fbo.h"
#include "intel_mipmap_tree.h"
-#include "intel_regions.h"
+#include "intel_image.h"
#include "intel_screen.h"
#include "intel_tex.h"
#include "brw_context.h"
{
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- ASSERT(irb);
+ assert(irb);
intel_miptree_release(&irb->mt);
+ intel_miptree_release(&irb->singlesample_mt);
_mesa_delete_renderbuffer(ctx, rb);
}
+/**
+ * \brief Downsample a winsys renderbuffer from mt to singlesample_mt.
+ *
+ * If the miptree needs no downsample, then skip.
+ */
+void
+intel_renderbuffer_downsample(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
+{
+ if (!irb->need_downsample)
+ return;
+ intel_miptree_updownsample(brw, irb->mt, irb->singlesample_mt);
+ irb->need_downsample = false;
+}
+
+/**
+ * \brief Upsample a winsys renderbuffer from singlesample_mt to mt.
+ *
+ * The upsample is done unconditionally.
+ */
+void
+intel_renderbuffer_upsample(struct brw_context *brw,
+ struct intel_renderbuffer *irb)
+{
+ assert(!irb->need_downsample);
+
+ intel_miptree_updownsample(brw, irb->singlesample_mt, irb->mt);
+}
+
/**
* \see dd_function_table::MapRenderbuffer
*/
struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct intel_mipmap_tree *mt;
void *map;
- int stride;
+ ptrdiff_t stride;
if (srb->Buffer) {
/* this is a malloc'd renderbuffer (accum buffer), not an irb */
intel_prepare_render(brw);
+ /* The MapRenderbuffer API should always return a single-sampled mapping.
+ * The case we are asked to map multisampled RBs is in glReadPixels() (or
+ * swrast paths like glCopyTexImage()) from a window-system MSAA buffer,
+ * and GL expects an automatic resolve to happen.
+ *
+ * If it's a color miptree, there is a ->singlesample_mt which wraps the
+ * actual window system renderbuffer (which we may resolve to at any time),
+ * while the miptree itself is our driver-private allocation. If it's a
+ * depth or stencil miptree, we have a private MSAA buffer and no shared
+ * singlesample buffer, and since we don't expect anybody to ever actually
+ * resolve it, we just make a temporary singlesample buffer now when we
+ * have to.
+ */
+ if (rb->NumSamples > 1) {
+ if (!irb->singlesample_mt) {
+ irb->singlesample_mt =
+ intel_miptree_create_for_renderbuffer(brw, irb->mt->format,
+ rb->Width, rb->Height,
+ 0 /*num_samples*/);
+ if (!irb->singlesample_mt)
+ goto fail;
+ irb->singlesample_mt_is_tmp = true;
+ irb->need_downsample = true;
+ }
+
+ intel_renderbuffer_downsample(brw, irb);
+ mt = irb->singlesample_mt;
+
+ irb->need_map_upsample = mode & GL_MAP_WRITE_BIT;
+ } else {
+ mt = irb->mt;
+ }
+
/* For a window-system renderbuffer, we need to flip the mapping we receive
* upside-down. So we need to ask for a rectangle on flipped vertically, and
* we then return a pointer to the bottom of it with a negative stride.
y = rb->Height - y - h;
}
- intel_miptree_map(brw, irb->mt, irb->mt_level, irb->mt_layer,
+ intel_miptree_map(brw, mt, irb->mt_level, irb->mt_layer,
x, y, w, h, mode, &map, &stride);
if (rb->Name == 0) {
stride = -stride;
}
- DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%d\n",
- __FUNCTION__, rb->Name, _mesa_get_format_name(rb->Format),
+ DBG("%s: rb %d (%s) mt mapped: (%d, %d) (%dx%d) -> %p/%"PRIdPTR"\n",
+ __func__, rb->Name, _mesa_get_format_name(rb->Format),
x, y, w, h, map, stride);
*out_map = map;
*out_stride = stride;
+ return;
+
+fail:
+ *out_map = NULL;
+ *out_stride = 0;
}
/**
struct brw_context *brw = brw_context(ctx);
struct swrast_renderbuffer *srb = (struct swrast_renderbuffer *)rb;
struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+ struct intel_mipmap_tree *mt;
- DBG("%s: rb %d (%s)\n", __FUNCTION__,
+ DBG("%s: rb %d (%s)\n", __func__,
rb->Name, _mesa_get_format_name(rb->Format));
if (srb->Buffer) {
return;
}
- intel_miptree_unmap(brw, irb->mt, irb->mt_level, irb->mt_layer);
+ if (rb->NumSamples > 1) {
+ mt = irb->singlesample_mt;
+ } else {
+ mt = irb->mt;
+ }
+
+ intel_miptree_unmap(brw, mt, irb->mt_level, irb->mt_layer);
+
+ if (irb->need_map_upsample) {
+ intel_renderbuffer_upsample(brw, irb);
+ irb->need_map_upsample = false;
+ }
+
+ if (irb->singlesample_mt_is_tmp)
+ intel_miptree_release(&irb->singlesample_mt);
}
return quantized_samples;
}
-
-/**
- * Called via glRenderbufferStorageEXT() to set the format and allocate
- * storage for a user-created renderbuffer.
- */
-static GLboolean
-intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
- GLenum internalFormat,
- GLuint width, GLuint height)
+static mesa_format
+intel_renderbuffer_format(struct gl_context * ctx, GLenum internalFormat)
{
struct brw_context *brw = brw_context(ctx);
- struct intel_screen *screen = brw->intelScreen;
- struct intel_renderbuffer *irb = intel_renderbuffer(rb);
- rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
switch (internalFormat) {
default:
* except they're less useful because you can't texture with
* them.
*/
- rb->Format = ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
- internalFormat,
- GL_NONE, GL_NONE);
+ return ctx->Driver.ChooseTextureFormat(ctx, GL_TEXTURE_2D,
+ internalFormat,
+ GL_NONE, GL_NONE);
break;
case GL_STENCIL_INDEX:
case GL_STENCIL_INDEX1_EXT:
case GL_STENCIL_INDEX16_EXT:
/* These aren't actual texture formats, so force them here. */
if (brw->has_separate_stencil) {
- rb->Format = MESA_FORMAT_S_UINT8;
+ return MESA_FORMAT_S_UINT8;
} else {
assert(!brw->must_use_separate_stencil);
- rb->Format = MESA_FORMAT_Z24_UNORM_S8_UINT;
+ return MESA_FORMAT_Z24_UNORM_S8_UINT;
}
- break;
}
+}
+
+static GLboolean
+intel_alloc_private_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct intel_screen *screen = brw->intelScreen;
+ struct intel_renderbuffer *irb = intel_renderbuffer(rb);
+
+ assert(rb->Format != MESA_FORMAT_NONE);
+ rb->NumSamples = intel_quantize_num_samples(screen, rb->NumSamples);
rb->Width = width;
rb->Height = height;
rb->_BaseFormat = _mesa_base_fbo_format(ctx, internalFormat);
intel_miptree_release(&irb->mt);
- DBG("%s: %s: %s (%dx%d)\n", __FUNCTION__,
- _mesa_lookup_enum_by_nr(internalFormat),
+ DBG("%s: %s: %s (%dx%d)\n", __func__,
+ _mesa_enum_to_string(internalFormat),
_mesa_get_format_name(rb->Format), width, height);
if (width == 0 || height == 0)
if (!irb->mt)
return false;
+ irb->layer_count = 1;
+
return true;
}
+/**
+ * Called via glRenderbufferStorageEXT() to set the format and allocate
+ * storage for a user-created renderbuffer.
+ */
+static GLboolean
+intel_alloc_renderbuffer_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
+ GLenum internalFormat,
+ GLuint width, GLuint height)
+{
+ rb->Format = intel_renderbuffer_format(ctx, internalFormat);
+ return intel_alloc_private_renderbuffer_storage(ctx, rb, internalFormat, width, height);
+}
static void
intel_image_target_renderbuffer_storage(struct gl_context *ctx,
return;
}
- /* Buffers originating from outside are for read-only. */
- if (image->dma_buf_imported) {
- _mesa_error(ctx, GL_INVALID_OPERATION,
- "glEGLImageTargetRenderbufferStorage(dma buffers are read-only)");
- return;
- }
-
/* __DRIimage is opaque to the core so it has to be checked here */
switch (image->format) {
case MESA_FORMAT_R8G8B8A8_UNORM:
irb = intel_renderbuffer(rb);
intel_miptree_release(&irb->mt);
+
+ /* Disable creation of the miptree's aux buffers because the driver exposes
+ * no EGL API to manage them. That is, there is no API for resolving the aux
+ * buffer's content to the main buffer nor for invalidating the aux buffer's
+ * content.
+ */
irb->mt = intel_miptree_create_for_bo(brw,
- image->region->bo,
+ image->bo,
image->format,
image->offset,
- image->region->width,
- image->region->height,
- image->region->pitch,
- image->region->tiling);
+ image->width,
+ image->height,
+ 1,
+ image->pitch,
+ MIPTREE_LAYOUT_DISABLE_AUX);
if (!irb->mt)
return;
rb->InternalFormat = image->internal_format;
- rb->Width = image->region->width;
- rb->Height = image->region->height;
+ rb->Width = image->width;
+ rb->Height = image->height;
rb->Format = image->format;
- rb->_BaseFormat = _mesa_base_fbo_format(ctx, image->internal_format);
+ rb->_BaseFormat = _mesa_get_format_base_format(image->format);
rb->NeedsFinishRenderTexture = true;
+ irb->layer_count = 1;
}
/**
intel_alloc_window_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
- ASSERT(rb->Name == 0);
+ assert(rb->Name == 0);
rb->Width = width;
rb->Height = height;
rb->InternalFormat = internalFormat;
intel_nop_alloc_storage(struct gl_context * ctx, struct gl_renderbuffer *rb,
GLenum internalFormat, GLuint width, GLuint height)
{
- _mesa_problem(ctx, "intel_op_alloc_storage should never be called.");
+ _mesa_problem(ctx, "intel_nop_alloc_storage should never be called.");
return false;
}
}
rb = &irb->Base.Base;
+ irb->layer_count = 1;
_mesa_init_renderbuffer(rb, 0);
rb->ClassID = INTEL_RB_CLASS;
struct intel_renderbuffer *irb;
irb = intel_create_renderbuffer(format, num_samples);
- irb->Base.Base.AllocStorage = intel_alloc_renderbuffer_storage;
+ irb->Base.Base.AllocStorage = intel_alloc_private_renderbuffer_storage;
return irb;
}
static bool
intel_renderbuffer_update_wrapper(struct brw_context *brw,
struct intel_renderbuffer *irb,
- struct gl_texture_image *image,
- uint32_t layer)
+ struct gl_texture_image *image,
+ uint32_t layer,
+ bool layered)
{
struct gl_renderbuffer *rb = &irb->Base.Base;
struct intel_texture_image *intel_image = intel_texture_image(image);
rb->AllocStorage = intel_nop_alloc_storage;
+ /* adjust for texture view parameters */
+ layer += image->TexObject->MinLayer;
+ level += image->TexObject->MinLevel;
+
intel_miptree_check_level_layer(mt, level, layer);
irb->mt_level = level;
+ int layer_multiplier;
switch (mt->msaa_layout) {
case INTEL_MSAA_LAYOUT_UMS:
case INTEL_MSAA_LAYOUT_CMS:
- irb->mt_layer = layer * mt->num_samples;
+ layer_multiplier = mt->num_samples;
break;
default:
- irb->mt_layer = layer;
+ layer_multiplier = 1;
+ }
+
+ irb->mt_layer = layer_multiplier * layer;
+
+ if (!layered) {
+ irb->layer_count = 1;
+ } else if (image->TexObject->NumLayers > 0) {
+ irb->layer_count = image->TexObject->NumLayers;
+ } else {
+ irb->layer_count = mt->level[level].depth / layer_multiplier;
}
intel_miptree_reference(&irb->mt, mt);
intel_renderbuffer_set_draw_offset(irb);
- if (mt->hiz_mt == NULL && brw_is_hiz_depth_format(brw, rb->Format)) {
+ if (intel_miptree_wants_hiz_buffer(brw, mt)) {
intel_miptree_alloc_hiz(brw, mt);
- if (!mt->hiz_mt)
+ if (!mt->hiz_buf)
return false;
}
intel_miptree_check_level_layer(mt, att->TextureLevel, layer);
- if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer)) {
+ if (!intel_renderbuffer_update_wrapper(brw, irb, image, layer, att->Layered)) {
_swrast_render_texture(ctx, fb, att);
return;
}
}
-/**
- * Called by Mesa when rendering to a texture is done.
- */
-static void
-intel_finish_render_texture(struct gl_context * ctx, struct gl_renderbuffer *rb)
-{
- struct brw_context *brw = brw_context(ctx);
-
- DBG("Finish render %s texture\n", _mesa_get_format_name(rb->Format));
-
- /* Since we've (probably) rendered to the texture and will (likely) use
- * it in the texture domain later on in this batchbuffer, flush the
- * batch. Once again, we wish for a domain tracker in libdrm to cover
- * usage inside of a batchbuffer like GEM does in the kernel.
- */
- intel_batchbuffer_emit_mi_flush(brw);
-}
-
#define fbo_incomplete(fb, ...) do { \
static GLuint msg_id = 0; \
if (unlikely(ctx->Const.ContextFlags & GL_CONTEXT_FLAG_DEBUG_BIT)) { \
_mesa_gl_debug(ctx, &msg_id, \
+ MESA_DEBUG_SOURCE_API, \
MESA_DEBUG_TYPE_OTHER, \
MESA_DEBUG_SEVERITY_MEDIUM, \
__VA_ARGS__); \
struct intel_renderbuffer *stencilRb =
intel_get_renderbuffer(fb, BUFFER_STENCIL);
struct intel_mipmap_tree *depth_mt = NULL, *stencil_mt = NULL;
- int i;
+ unsigned i;
- DBG("%s() on fb %p (%s)\n", __FUNCTION__,
+ DBG("%s() on fb %p (%s)\n", __func__,
fb, (fb == ctx->DrawBuffer ? "drawbuffer" :
(fb == ctx->ReadBuffer ? "readbuffer" : "other buffer")));
}
if (depth_mt && stencil_mt) {
- if (brw->gen >= 7) {
- /* For gen >= 7, we are using the lod/minimum-array-element fields
- * and supportting layered rendering. This means that we must restrict
+ if (brw->gen >= 6) {
+ /* For gen >= 6, we are using the lod/minimum-array-element fields
+ * and supporting layered rendering. This means that we must restrict
* the depth & stencil attachments to match in various more retrictive
* ways. (width, height, depth, LOD and layer)
*/
}
}
- for (i = 0; i < Elements(fb->Attachment); i++) {
+ for (i = 0; i < ARRAY_SIZE(fb->Attachment); i++) {
struct gl_renderbuffer *rb;
struct intel_renderbuffer *irb;
*/
static GLbitfield
intel_blit_framebuffer_with_blitter(struct gl_context *ctx,
+ const struct gl_framebuffer *readFb,
+ const struct gl_framebuffer *drawFb,
GLint srcX0, GLint srcY0,
GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0,
intel_prepare_render(brw);
if (mask & GL_COLOR_BUFFER_BIT) {
- GLint i;
- const struct gl_framebuffer *drawFb = ctx->DrawBuffer;
- const struct gl_framebuffer *readFb = ctx->ReadBuffer;
+ unsigned i;
struct gl_renderbuffer *src_rb = readFb->_ColorReadBuffer;
struct intel_renderbuffer *src_irb = intel_renderbuffer(src_rb);
* results are undefined if any destination pixels have a dependency on
* source pixels.
*/
- for (i = 0; i < ctx->DrawBuffer->_NumColorDrawBuffers; i++) {
- struct gl_renderbuffer *dst_rb = ctx->DrawBuffer->_ColorDrawBuffers[i];
+ for (i = 0; i < drawFb->_NumColorDrawBuffers; i++) {
+ struct gl_renderbuffer *dst_rb = drawFb->_ColorDrawBuffers[i];
struct intel_renderbuffer *dst_irb = intel_renderbuffer(dst_rb);
if (!dst_irb) {
static void
intel_blit_framebuffer(struct gl_context *ctx,
+ struct gl_framebuffer *readFb,
+ struct gl_framebuffer *drawFb,
GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
GLbitfield mask, GLenum filter)
{
- mask = brw_blorp_framebuffer(brw_context(ctx),
+ struct brw_context *brw = brw_context(ctx);
+
+ /* Page 679 of OpenGL 4.4 spec says:
+ * "Added BlitFramebuffer to commands affected by conditional rendering in
+ * section 10.10 (Bug 9562)."
+ */
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ mask = brw_blorp_framebuffer(brw, readFb, drawFb,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
if (mask == 0x0)
return;
+ mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+ if (brw->gen >= 8 && (mask & GL_STENCIL_BUFFER_BIT)) {
+ brw_meta_fbo_stencil_blit(brw_context(ctx), readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1);
+ mask &= ~GL_STENCIL_BUFFER_BIT;
+ if (mask == 0x0)
+ return;
+ }
+
/* Try using the BLT engine. */
- mask = intel_blit_framebuffer_with_blitter(ctx,
+ mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb,
srcX0, srcY0, srcX1, srcY1,
dstX0, dstY0, dstX1, dstY1,
mask, filter);
if (mask == 0x0)
return;
-
- _mesa_meta_BlitFramebuffer(ctx,
- srcX0, srcY0, srcX1, srcY1,
- dstX0, dstY0, dstX1, dstY1,
- mask, filter);
+ _swrast_BlitFramebuffer(ctx, readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
}
/**
- * This is a no-op except on multisample buffers shared with DRI2.
+ * Gen4-5 implementation of glBlitFrameBuffer().
+ *
+ * Tries BLT, Meta, then swrast.
+ *
+ * Gen4-5 have a single ring for both 3D and BLT operations, so there's no
+ * inter-ring synchronization issues like on Gen6+. It is apparently faster
+ * than using the 3D pipeline. Original Gen4 also has to rebase and copy
+ * miptree slices in order to render to unaligned locations.
*/
-void
-intel_renderbuffer_set_needs_downsample(struct intel_renderbuffer *irb)
+static void
+gen4_blit_framebuffer(struct gl_context *ctx,
+ struct gl_framebuffer *readFb,
+ struct gl_framebuffer *drawFb,
+ GLint srcX0, GLint srcY0, GLint srcX1, GLint srcY1,
+ GLint dstX0, GLint dstY0, GLint dstX1, GLint dstY1,
+ GLbitfield mask, GLenum filter)
{
- if (irb->mt && irb->mt->singlesample_mt)
- irb->mt->need_downsample = true;
+ /* Page 679 of OpenGL 4.4 spec says:
+ * "Added BlitFramebuffer to commands affected by conditional rendering in
+ * section 10.10 (Bug 9562)."
+ */
+ if (!_mesa_check_conditional_render(ctx))
+ return;
+
+ mask = intel_blit_framebuffer_with_blitter(ctx, readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+ mask = _mesa_meta_BlitFramebuffer(ctx, readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
+ if (mask == 0x0)
+ return;
+
+ _swrast_BlitFramebuffer(ctx, readFb, drawFb,
+ srcX0, srcY0, srcX1, srcY1,
+ dstX0, dstY0, dstX1, dstY1,
+ mask, filter);
}
/**
bool
intel_renderbuffer_has_hiz(struct intel_renderbuffer *irb)
{
- return intel_miptree_slice_has_hiz(irb->mt, irb->mt_level, irb->mt_layer);
+ return intel_miptree_level_has_hiz(irb->mt, irb->mt_level);
}
bool
struct intel_mipmap_tree *new_mt;
int width, height, depth;
+ uint32_t layout_flags = MIPTREE_LAYOUT_ACCELERATED_UPLOAD |
+ MIPTREE_LAYOUT_TILING_ANY;
+
intel_miptree_get_dimensions_for_image(rb->TexImage, &width, &height, &depth);
new_mt = intel_miptree_create(brw, rb->TexImage->TexObject->Target,
intel_image->base.Base.Level,
intel_image->base.Base.Level,
width, height, depth,
- true,
irb->mt->num_samples,
- INTEL_MIPTREE_TILING_ANY);
+ layout_flags);
- if (brw_is_hiz_depth_format(brw, new_mt->format)) {
+ if (intel_miptree_wants_hiz_buffer(brw, new_mt)) {
intel_miptree_alloc_hiz(brw, new_mt);
}
intel_miptree_release(&new_mt);
}
+void
+brw_render_cache_set_clear(struct brw_context *brw)
+{
+ struct set_entry *entry;
+
+ set_foreach(brw->render_cache, entry) {
+ _mesa_set_remove(brw->render_cache, entry);
+ }
+}
+
+void
+brw_render_cache_set_add_bo(struct brw_context *brw, drm_intel_bo *bo)
+{
+ _mesa_set_add(brw->render_cache, bo);
+}
+
+/**
+ * Emits an appropriate flush for a BO if it has been rendered to within the
+ * same batchbuffer as a read that's about to be emitted.
+ *
+ * The GPU has separate, incoherent caches for the render cache and the
+ * sampler cache, along with other caches. Usually data in the different
+ * caches don't interact (e.g. we don't render to our driver-generated
+ * immediate constant data), but for render-to-texture in FBOs we definitely
+ * do. When a batchbuffer is flushed, the kernel will ensure that everything
+ * necessary is flushed before another use of that BO, but for reuse from
+ * different caches within a batchbuffer, it's all our responsibility.
+ */
+void
+brw_render_cache_set_check_flush(struct brw_context *brw, drm_intel_bo *bo)
+{
+ if (!_mesa_set_search(brw->render_cache, bo))
+ return;
+
+ brw_emit_mi_flush(brw);
+}
+
/**
* Do one-time context initializations related to GL_EXT_framebuffer_object.
* Hook in device driver functions.
dd->MapRenderbuffer = intel_map_renderbuffer;
dd->UnmapRenderbuffer = intel_unmap_renderbuffer;
dd->RenderTexture = intel_render_texture;
- dd->FinishRenderTexture = intel_finish_render_texture;
dd->ValidateFramebuffer = intel_validate_framebuffer;
- dd->BlitFramebuffer = intel_blit_framebuffer;
+ if (brw->gen >= 6)
+ dd->BlitFramebuffer = intel_blit_framebuffer;
+ else
+ dd->BlitFramebuffer = gen4_blit_framebuffer;
dd->EGLImageTargetRenderbufferStorage =
intel_image_target_renderbuffer_storage;
+
+ brw->render_cache = _mesa_set_create(brw, _mesa_hash_pointer,
+ _mesa_key_pointer_equal);
}