#include "swrast/swrast.h"
#include "drivers/common/meta.h"
-#include "intel_context.h"
+#include "intel_batchbuffer.h"
#include "intel_blit.h"
-#include "intel_clear.h"
#include "intel_fbo.h"
+#include "intel_mipmap_tree.h"
#include "intel_regions.h"
+#include "brw_context.h"
+#include "brw_blorp.h"
+
#define FILE_DEBUG_FLAG DEBUG_BLIT
static const char *buffer_names[] = {
}
}
+/**
+ * Returns true if the scissor is a noop (cuts out nothing).
+ */
+static bool
+noop_scissor(struct gl_context *ctx, struct gl_framebuffer *fb)
+{
+ return ctx->Scissor.X <= 0 &&
+ ctx->Scissor.Y <= 0 &&
+ ctx->Scissor.Width >= fb->Width &&
+ ctx->Scissor.Height >= fb->Height;
+}
+
+/**
+ * Implements fast depth clears on gen6+.
+ *
+ * Fast clears basically work by setting a flag in each of the subspans
+ * represented in the HiZ buffer that says "When you need the depth values for
+ * this subspan, it's the hardware's current clear value." Then later rendering
+ * can just use the static clear value instead of referencing memory.
+ *
+ * The tricky part of the implementation is that you have to have the clear
+ * value that was used on the depth buffer in place for all further rendering,
+ * at least until a resolve to the real depth buffer happens.
+ */
+static bool
+brw_fast_clear_depth(struct gl_context *ctx)
+{
+ struct brw_context *brw = brw_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ struct intel_renderbuffer *depth_irb =
+ intel_get_renderbuffer(fb, BUFFER_DEPTH);
+ struct intel_mipmap_tree *mt = depth_irb->mt;
+ struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
+
+ if (brw->gen < 6)
+ return false;
+
+ if (!intel_renderbuffer_has_hiz(depth_irb))
+ return false;
+
+ /* We only handle full buffer clears -- otherwise you'd have to track whether
+ * a previous clear had happened at a different clear value and resolve it
+ * first.
+ */
+ if (ctx->Scissor.Enabled && !noop_scissor(ctx, fb)) {
+ perf_debug("Failed to fast clear depth due to scissor being enabled. "
+ "Possible 5%% performance win if avoided.\n");
+ return false;
+ }
+
+ uint32_t depth_clear_value;
+ switch (mt->format) {
+ case MESA_FORMAT_Z32_FLOAT_X24S8:
+ case MESA_FORMAT_S8_Z24:
+ /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
+ *
+ * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
+ * enabled (the legacy method of clearing must be performed):
+ *
+ * - If the depth buffer format is D32_FLOAT_S8X24_UINT or
+ * D24_UNORM_S8_UINT.
+ */
+ return false;
+
+ case MESA_FORMAT_Z32_FLOAT:
+ depth_clear_value = float_as_int(ctx->Depth.Clear);
+ break;
+
+ case MESA_FORMAT_Z16:
+ /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
+ *
+ * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
+ * enabled (the legacy method of clearing must be performed):
+ *
+ * - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
+ * width of the map (LOD0) is not multiple of 16, fast clear
+ * optimization must be disabled.
+ */
+ if (brw->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0)
+ return false;
+ /* FALLTHROUGH */
+
+ default:
+ depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
+ break;
+ }
+
+ /* If we're clearing to a new clear value, then we need to resolve any clear
+ * flags out of the HiZ buffer into the real depth buffer.
+ */
+ if (mt->depth_clear_value != depth_clear_value) {
+ intel_miptree_all_slices_resolve_depth(brw, mt);
+ mt->depth_clear_value = depth_clear_value;
+ }
+
+ /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
+ *
+ * "If other rendering operations have preceded this clear, a
+ * PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
+ * must be issued before the rectangle primitive used for the depth
+ * buffer clear operation.
+ */
+ intel_batchbuffer_emit_mi_flush(brw);
+
+ intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
+ GEN6_HIZ_OP_DEPTH_CLEAR);
+
+ if (brw->gen == 6) {
+ /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
+ *
+ * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
+ * by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
+ * followed by Depth FLUSH'
+ */
+ intel_batchbuffer_emit_mi_flush(brw);
+ }
+
+ /* Now, the HiZ buffer contains data that needs to be resolved to the depth
+ * buffer.
+ */
+ intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
+
+ return true;
+}
+
/**
* Called by ctx->Driver.Clear.
*/
static void
-intelClear(struct gl_context *ctx, GLbitfield mask)
+brw_clear(struct gl_context *ctx, GLbitfield mask)
{
- struct intel_context *intel = intel_context(ctx);
+ struct brw_context *brw = brw_context(ctx);
+ struct gl_framebuffer *fb = ctx->DrawBuffer;
+ bool partial_clear = ctx->Scissor.Enabled && !noop_scissor(ctx, fb);
if (!_mesa_check_conditional_render(ctx))
return;
if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
- intel->front_buffer_dirty = true;
+ brw->front_buffer_dirty = true;
+ }
+
+ intel_prepare_render(brw);
+ brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
+
+ if (mask & BUFFER_BIT_DEPTH) {
+ if (brw_fast_clear_depth(ctx)) {
+ DBG("fast clear: depth\n");
+ mask &= ~BUFFER_BIT_DEPTH;
+ }
+ }
+
+ /* BLORP is currently only supported on Gen6+. */
+ if (brw->gen >= 6) {
+ if (mask & BUFFER_BITS_COLOR) {
+ if (brw_blorp_clear_color(brw, fb, partial_clear)) {
+ debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
+ mask &= ~BUFFER_BITS_COLOR;
+ }
+ }
}
GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
if (tri_mask) {
debug_mask("tri", tri_mask);
mask &= ~tri_mask;
- _mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
+
+ if (ctx->API == API_OPENGLES) {
+ _mesa_meta_Clear(&brw->ctx, tri_mask);
+ } else {
+ _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
+ }
}
/* Any strange buffers get passed off to swrast */
void
intelInitClearFuncs(struct dd_function_table *functions)
{
- functions->Clear = intelClear;
+ functions->Clear = brw_clear;
}