i965: Add and use a single miptree aux_buf field
[mesa.git] / src / mesa / drivers / dri / i965 / brw_clear.c
1 /*
2 * Copyright 2003 VMware, Inc.
3 * Copyright 2009, 2012 Intel Corporation.
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the
15 * next paragraph) shall be included in all copies or substantial portions
16 * of the Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
19 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
22 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
23 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
24 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 */
26
27 #include "main/mtypes.h"
28 #include "main/condrender.h"
29 #include "swrast/swrast.h"
30 #include "drivers/common/meta.h"
31
32 #include "intel_batchbuffer.h"
33 #include "intel_blit.h"
34 #include "intel_fbo.h"
35 #include "intel_mipmap_tree.h"
36
37 #include "brw_context.h"
38 #include "brw_blorp.h"
39 #include "brw_defines.h"
40
41 #define FILE_DEBUG_FLAG DEBUG_BLIT
42
43 static const char *buffer_names[] = {
44 [BUFFER_FRONT_LEFT] = "front",
45 [BUFFER_BACK_LEFT] = "back",
46 [BUFFER_FRONT_RIGHT] = "front right",
47 [BUFFER_BACK_RIGHT] = "back right",
48 [BUFFER_DEPTH] = "depth",
49 [BUFFER_STENCIL] = "stencil",
50 [BUFFER_ACCUM] = "accum",
51 [BUFFER_AUX0] = "aux0",
52 [BUFFER_COLOR0] = "color0",
53 [BUFFER_COLOR1] = "color1",
54 [BUFFER_COLOR2] = "color2",
55 [BUFFER_COLOR3] = "color3",
56 [BUFFER_COLOR4] = "color4",
57 [BUFFER_COLOR5] = "color5",
58 [BUFFER_COLOR6] = "color6",
59 [BUFFER_COLOR7] = "color7",
60 };
61
62 static void
63 debug_mask(const char *name, GLbitfield mask)
64 {
65 GLuint i;
66
67 if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
68 DBG("%s clear:", name);
69 for (i = 0; i < BUFFER_COUNT; i++) {
70 if (mask & (1 << i))
71 DBG(" %s", buffer_names[i]);
72 }
73 DBG("\n");
74 }
75 }
76
77 /**
78 * Returns true if the scissor is a noop (cuts out nothing).
79 */
80 static bool
81 noop_scissor(struct gl_framebuffer *fb)
82 {
83 return fb->_Xmin <= 0 &&
84 fb->_Ymin <= 0 &&
85 fb->_Xmax >= fb->Width &&
86 fb->_Ymax >= fb->Height;
87 }
88
89 /**
90 * Implements fast depth clears on gen6+.
91 *
92 * Fast clears basically work by setting a flag in each of the subspans
93 * represented in the HiZ buffer that says "When you need the depth values for
94 * this subspan, it's the hardware's current clear value." Then later rendering
95 * can just use the static clear value instead of referencing memory.
96 *
97 * The tricky part of the implementation is that you have to have the clear
98 * value that was used on the depth buffer in place for all further rendering,
99 * at least until a resolve to the real depth buffer happens.
100 */
101 static bool
102 brw_fast_clear_depth(struct gl_context *ctx)
103 {
104 struct brw_context *brw = brw_context(ctx);
105 struct gl_framebuffer *fb = ctx->DrawBuffer;
106 struct intel_renderbuffer *depth_irb =
107 intel_get_renderbuffer(fb, BUFFER_DEPTH);
108 struct intel_mipmap_tree *mt = depth_irb->mt;
109 struct gl_renderbuffer_attachment *depth_att = &fb->Attachment[BUFFER_DEPTH];
110 const struct gen_device_info *devinfo = &brw->screen->devinfo;
111 bool same_clear_value = true;
112
113 if (devinfo->gen < 6)
114 return false;
115
116 if (!intel_renderbuffer_has_hiz(depth_irb))
117 return false;
118
119 /* We only handle full buffer clears -- otherwise you'd have to track whether
120 * a previous clear had happened at a different clear value and resolve it
121 * first.
122 */
123 if ((ctx->Scissor.EnableFlags & 1) && !noop_scissor(fb)) {
124 perf_debug("Failed to fast clear %dx%d depth because of scissors. "
125 "Possible 5%% performance win if avoided.\n",
126 mt->surf.logical_level0_px.width,
127 mt->surf.logical_level0_px.height);
128 return false;
129 }
130
131 switch (mt->format) {
132 case MESA_FORMAT_Z32_FLOAT_S8X24_UINT:
133 case MESA_FORMAT_Z24_UNORM_S8_UINT:
134 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
135 *
136 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
137 * enabled (the legacy method of clearing must be performed):
138 *
139 * - If the depth buffer format is D32_FLOAT_S8X24_UINT or
140 * D24_UNORM_S8_UINT.
141 */
142 return false;
143
144 case MESA_FORMAT_Z_UNORM16:
145 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
146 *
147 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
148 * enabled (the legacy method of clearing must be performed):
149 *
150 * - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
151 * width of the map (LOD0) is not multiple of 16, fast clear
152 * optimization must be disabled.
153 */
154 if (devinfo->gen == 6 &&
155 (minify(mt->surf.phys_level0_sa.width,
156 depth_irb->mt_level - mt->first_level) % 16) != 0)
157 return false;
158 break;
159
160 default:
161 break;
162 }
163
164 /* Quantize the clear value to what can be stored in the actual depth
165 * buffer. This makes the following check more accurate because it now
166 * checks if the actual depth bits will match. It also prevents us from
167 * getting a too-accurate depth value during depth testing or when sampling
168 * with HiZ enabled.
169 */
170 float clear_value =
171 mt->format == MESA_FORMAT_Z_FLOAT32 ? ctx->Depth.Clear :
172 (unsigned)(ctx->Depth.Clear * fb->_DepthMax) / (float)fb->_DepthMax;
173
174 const uint32_t num_layers = depth_att->Layered ? depth_irb->layer_count : 1;
175
176 /* If we're clearing to a new clear value, then we need to resolve any clear
177 * flags out of the HiZ buffer into the real depth buffer.
178 */
179 if (mt->fast_clear_color.f32[0] != clear_value) {
180 for (uint32_t level = mt->first_level; level <= mt->last_level; level++) {
181 if (!intel_miptree_level_has_hiz(mt, level))
182 continue;
183
184 const unsigned level_layers = brw_get_num_logical_layers(mt, level);
185
186 for (uint32_t layer = 0; layer < level_layers; layer++) {
187 if (level == depth_irb->mt_level &&
188 layer >= depth_irb->mt_layer &&
189 layer < depth_irb->mt_layer + num_layers) {
190 /* We're going to clear this layer anyway. Leave it alone. */
191 continue;
192 }
193
194 enum isl_aux_state aux_state =
195 intel_miptree_get_aux_state(mt, level, layer);
196
197 if (aux_state != ISL_AUX_STATE_CLEAR &&
198 aux_state != ISL_AUX_STATE_COMPRESSED_CLEAR) {
199 /* This slice doesn't have any fast-cleared bits. */
200 continue;
201 }
202
203 /* If we got here, then the level may have fast-clear bits that
204 * use the old clear value. We need to do a depth resolve to get
205 * rid of their use of the clear value before we can change it.
206 * Fortunately, few applications ever change their depth clear
207 * value so this shouldn't happen often.
208 */
209 intel_hiz_exec(brw, mt, level, layer, 1,
210 ISL_AUX_OP_FULL_RESOLVE);
211 intel_miptree_set_aux_state(brw, mt, level, layer, 1,
212 ISL_AUX_STATE_RESOLVED);
213 }
214 }
215
216 intel_miptree_set_depth_clear_value(brw, mt, clear_value);
217 same_clear_value = false;
218 }
219
220 bool need_clear = false;
221 for (unsigned a = 0; a < num_layers; a++) {
222 enum isl_aux_state aux_state =
223 intel_miptree_get_aux_state(mt, depth_irb->mt_level,
224 depth_irb->mt_layer + a);
225
226 if (aux_state != ISL_AUX_STATE_CLEAR) {
227 need_clear = true;
228 break;
229 }
230 }
231
232 if (!need_clear) {
233 /* If all of the layers we intend to clear are already in the clear
234 * state then simply updating the miptree fast clear value is sufficient
235 * to change their clear value.
236 */
237 if (devinfo->gen >= 10 && !same_clear_value) {
238 /* Before gen10, it was enough to just update the clear value in the
239 * miptree. But on gen10+, we let blorp update the clear value state
240 * buffer when doing a fast clear. Since we are skipping the fast
241 * clear here, we need to update the clear color ourselves.
242 */
243 uint32_t clear_offset = mt->aux_buf->clear_color_offset;
244 union isl_color_value clear_color = { .f32 = { clear_value, } };
245
246 /* We can't update the clear color while the hardware is still using
247 * the previous one for a resolve or sampling from it. So make sure
248 * that there's no pending commands at this point.
249 */
250 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_CS_STALL);
251 for (int i = 0; i < 4; i++) {
252 brw_store_data_imm32(brw, mt->aux_buf->clear_color_bo,
253 clear_offset + i * 4, clear_color.u32[i]);
254 }
255 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_STATE_CACHE_INVALIDATE);
256 }
257 return true;
258 }
259
260 for (unsigned a = 0; a < num_layers; a++) {
261 enum isl_aux_state aux_state =
262 intel_miptree_get_aux_state(mt, depth_irb->mt_level,
263 depth_irb->mt_layer + a);
264
265 if (aux_state != ISL_AUX_STATE_CLEAR) {
266 intel_hiz_exec(brw, mt, depth_irb->mt_level,
267 depth_irb->mt_layer + a, 1,
268 ISL_AUX_OP_FAST_CLEAR);
269 }
270 }
271
272 /* Now, the HiZ buffer contains data that needs to be resolved to the depth
273 * buffer.
274 */
275 intel_miptree_set_aux_state(brw, mt, depth_irb->mt_level,
276 depth_irb->mt_layer, num_layers,
277 ISL_AUX_STATE_CLEAR);
278
279 return true;
280 }
281
282 /**
283 * Called by ctx->Driver.Clear.
284 */
285 static void
286 brw_clear(struct gl_context *ctx, GLbitfield mask)
287 {
288 struct brw_context *brw = brw_context(ctx);
289 struct gl_framebuffer *fb = ctx->DrawBuffer;
290 const struct gen_device_info *devinfo = &brw->screen->devinfo;
291 bool partial_clear = ctx->Scissor.EnableFlags && !noop_scissor(fb);
292
293 if (!_mesa_check_conditional_render(ctx))
294 return;
295
296 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
297 brw->front_buffer_dirty = true;
298 }
299
300 intel_prepare_render(brw);
301 brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
302
303 if (mask & BUFFER_BIT_DEPTH) {
304 if (brw_fast_clear_depth(ctx)) {
305 DBG("fast clear: depth\n");
306 mask &= ~BUFFER_BIT_DEPTH;
307 }
308 }
309
310 if (mask & BUFFER_BIT_STENCIL) {
311 struct intel_renderbuffer *stencil_irb =
312 intel_get_renderbuffer(fb, BUFFER_STENCIL);
313 struct intel_mipmap_tree *mt = stencil_irb->mt;
314 if (mt && mt->stencil_mt)
315 mt->stencil_mt->r8stencil_needs_update = true;
316 }
317
318 if (mask & BUFFER_BITS_COLOR) {
319 brw_blorp_clear_color(brw, fb, mask, partial_clear,
320 ctx->Color.sRGBEnabled);
321 debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
322 mask &= ~BUFFER_BITS_COLOR;
323 }
324
325 if (devinfo->gen >= 6 && (mask & BUFFER_BITS_DEPTH_STENCIL)) {
326 brw_blorp_clear_depth_stencil(brw, fb, mask, partial_clear);
327 debug_mask("blorp depth/stencil", mask & BUFFER_BITS_DEPTH_STENCIL);
328 mask &= ~BUFFER_BITS_DEPTH_STENCIL;
329 }
330
331 GLbitfield tri_mask = mask & (BUFFER_BIT_STENCIL |
332 BUFFER_BIT_DEPTH);
333
334 if (tri_mask) {
335 debug_mask("tri", tri_mask);
336 mask &= ~tri_mask;
337 _mesa_meta_glsl_Clear(&brw->ctx, tri_mask);
338 }
339
340 /* Any strange buffers get passed off to swrast. The only thing that
341 * should be left at this point is the accumulation buffer.
342 */
343 assert((mask & ~BUFFER_BIT_ACCUM) == 0);
344 if (mask) {
345 debug_mask("swrast", mask);
346 _swrast_Clear(ctx, mask);
347 }
348 }
349
350
351 void
352 intelInitClearFuncs(struct dd_function_table *functions)
353 {
354 functions->Clear = brw_clear;
355 }