i965: Track that the stencil data was updated when clearing
[mesa.git] / src / mesa / drivers / dri / i965 / brw_clear.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * Copyright 2009, 2012 Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "main/mtypes.h"
28 #include "main/condrender.h"
29 #include "swrast/swrast.h"
30 #include "drivers/common/meta.h"
31
32 #include "intel_batchbuffer.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
35 #include "intel_mipmap_tree.h"
36
37 #include "brw_context.h"
38 #include "brw_blorp.h"
39
40 #define FILE_DEBUG_FLAG DEBUG_BLIT
41
42 static const char *buffer_names[] = {
43 [BUFFER_FRONT_LEFT] = "front",
44 [BUFFER_BACK_LEFT] = "back",
45 [BUFFER_FRONT_RIGHT] = "front right",
46 [BUFFER_BACK_RIGHT] = "back right",
47 [BUFFER_DEPTH] = "depth",
48 [BUFFER_STENCIL] = "stencil",
49 [BUFFER_ACCUM] = "accum",
50 [BUFFER_AUX0] = "aux0",
51 [BUFFER_COLOR0] = "color0",
52 [BUFFER_COLOR1] = "color1",
53 [BUFFER_COLOR2] = "color2",
54 [BUFFER_COLOR3] = "color3",
55 [BUFFER_COLOR4] = "color4",
56 [BUFFER_COLOR5] = "color5",
57 [BUFFER_COLOR6] = "color6",
58 [BUFFER_COLOR7] = "color7",
59 };
60
61 static void
62 debug_mask(const char *name, GLbitfield mask)
63 {
64 GLuint i;
65
66 if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
67 DBG("%s clear:", name);
68 for (i = 0; i < BUFFER_COUNT; i++) {
69 if (mask & (1 << i))
70 DBG(" %s", buffer_names[i]);
71 }
72 DBG("\n");
73 }
74 }
75
76 /**
77 * Returns true if the scissor is a noop (cuts out nothing).
78 */
79 static bool
80 noop_scissor(struct gl_context *ctx, struct gl_framebuffer *fb)
81 {
82 return ctx->Scissor.ScissorArray[0].X <= 0 &&
83 ctx->Scissor.ScissorArray[0].Y <= 0 &&
84 ctx->Scissor.ScissorArray[0].Width >= fb->Width &&
85 ctx->Scissor.ScissorArray[0].Height >= fb->Height;
86 }
87
88 /**
89 * Implements fast depth clears on gen6+.
90 *
91 * Fast clears basically work by setting a flag in each of the subspans
92 * represented in the HiZ buffer that says "When you need the depth values for
93 * this subspan, it's the hardware's current clear value." Then later rendering
94 * can just use the static clear value instead of referencing memory.
95 *
96 * The tricky part of the implementation is that you have to have the clear
97 * value that was used on the depth buffer in place for all further rendering,
98 * at least until a resolve to the real depth buffer happens.
99 */
100 static bool
101 brw_fast_clear_depth(struct gl_context *ctx)
102 {
103 struct brw_context *brw = brw_context(ctx);
104 struct gl_framebuffer *fb = ctx->DrawBuffer;
105 struct intel_renderbuffer *depth_irb =
106 intel_get_renderbuffer(fb, BUFFER_DEPTH);
107 struct intel_mipmap_tree *mt = depth_irb->mt;
108 struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
109
110 if (brw->gen < 6)
111 return false;
112
113 if (!intel_renderbuffer_has_hiz(depth_irb))
114 return false;
115
116 /* We only handle full buffer clears -- otherwise you'd have to track whether
117 * a previous clear had happened at a different clear value and resolve it
118 * first.
119 */
120 if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(ctx, fb)) {
121 perf_debug("Failed to fast clear %dx%d depth because of scissors. "
122 "Possible 5%% performance win if avoided.\n",
123 mt->logical_width0, mt->logical_height0);
124 return false;
125 }
126
127 uint32_t depth_clear_value;
128 switch (mt->format) {
129 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
130 case MESA_FORMAT_Z24_UNORM_S8_UINT:
131 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
132 *
133 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
134 * enabled (the legacy method of clearing must be performed):
135 *
136 * - If the depth buffer format is D32_FLOAT_S8X24_UINT or
137 * D24_UNORM_S8_UINT.
138 */
139 return false;
140
141 case MESA_FORMAT_Z_FLOAT32:
142 depth_clear_value = float_as_int(ctx->Depth.Clear);
143 break;
144
145 case MESA_FORMAT_Z_UNORM16:
146 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
147 *
148 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
149 * enabled (the legacy method of clearing must be performed):
150 *
151 * - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
152 * width of the map (LOD0) is not multiple of 16, fast clear
153 * optimization must be disabled.
154 */
155 if (brw->gen == 6 &&
156 (minify(mt->physical_width0,
157 depth_irb->mt_level - mt->first_level) % 16) != 0)
158 return false;
159 /* FALLTHROUGH */
160
161 default:
162 if (brw->gen >= 8)
163 depth_clear_value = float_as_int(ctx->Depth.Clear);
164 else
165 depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
166 break;
167 }
168
169 /* If we're clearing to a new clear value, then we need to resolve any clear
170 * flags out of the HiZ buffer into the real depth buffer.
171 */
172 if (mt->depth_clear_value != depth_clear_value) {
173 intel_miptree_all_slices_resolve_depth(brw, mt);
174 mt->depth_clear_value = depth_clear_value;
175 }
176
177 /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
178 *
179 * "If other rendering operations have preceded this clear, a
180 * PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
181 * must be issued before the rectangle primitive used for the depth
182 * buffer clear operation.
183 */
184 brw_emit_mi_flush(brw);
185
186 if (fb->MaxNumLayers > 0) {
187 for (unsigned layer = 0; layer < depth_irb->layer_count; layer++) {
188 intel_hiz_exec(brw, mt, depth_irb->mt_level,
189 depth_irb->mt_layer + layer,
190 GEN6_HIZ_OP_DEPTH_CLEAR);
191 }
192 } else {
193 intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
194 GEN6_HIZ_OP_DEPTH_CLEAR);
195 }
196
197 if (brw->gen == 6) {
198 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
199 *
200 * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
201 * by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
202 * followed by Depth FLUSH'
203 */
204 brw_emit_mi_flush(brw);
205 }
206
207 /* Now, the HiZ buffer contains data that needs to be resolved to the depth
208 * buffer.
209 */
210 intel_renderbuffer_att_set_needs_depth_resolve(depth_att);
211
212 return true;
213 }
214
215 /**
216 * Called by ctx->Driver.Clear.
217 */
218 static void
219 brw_clear(struct gl_context *ctx, GLbitfield mask)
220 {
221 struct brw_context *brw = brw_context(ctx);
222 struct gl_framebuffer *fb = ctx->DrawBuffer;
223 bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(ctx, fb);
224
225 if (!_mesa_check_conditional_render(ctx))
226 return;
227
228 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
229 brw->front_buffer_dirty = true;
230 }
231
232 intel_prepare_render(brw);
233 brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
234
235 if (mask & BUFFER_BIT_DEPTH) {
236 if (brw_fast_clear_depth(ctx)) {
237 DBG("fast clear: depth\n");
238 mask &= ~BUFFER_BIT_DEPTH;
239 }
240 }
241
242 if (mask & BUFFER_BIT_STENCIL) {
243 struct intel_renderbuffer *stencil_irb =
244 intel_get_renderbuffer(fb, BUFFER_STENCIL);
245 struct intel_mipmap_tree *mt = stencil_irb->mt;
246 if (mt && mt->stencil_mt)
247 mt->stencil_mt->r8stencil_needs_update = true;
248 }
249
250 /* BLORP is currently only supported on Gen6+. */
251 if (brw->gen >= 6 && (mask & BUFFER_BITS_COLOR)) {
252 const bool encode_srgb = ctx->Color.sRGBEnabled;
253 if (brw_blorp_clear_color(brw, fb, mask, partial_clear, encode_srgb)) {
254 debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
255 mask &= ~BUFFER_BITS_COLOR;
256 }
257 }
258
259 GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
260 BUFFER_BIT_STENCIL |
261 BUFFER_BIT_DEPTH);
262
263 if (tri_mask) {
264 debug_mask("tri", tri_mask);
265 mask &= ~tri_mask;
266
267 if (ctx->API == API_OPENGLES) {
268 _mesa_meta_Clear(&brw->ctx, tri_mask);
269 } else {
270 _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
271 }
272 }
273
274 /* Any strange buffers get passed off to swrast */
275 if (mask) {
276 debug_mask("swrast", mask);
277 _swrast_Clear(ctx, mask);
278 }
279 }
280
281
282 void
283 intelInitClearFuncs(struct dd_function_table *functions)
284 {
285 functions->Clear = brw_clear;
286 }