i965: Pass brw_context to functions rather than intel_context.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_clear.c
1 /**************************************************************************
2 *
3 * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
4 * Copyright 2009, 2012 Intel Corporation.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
17 * of the Software.
18 *
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 *
27 **************************************************************************/
28
29 #include "main/glheader.h"
30 #include "main/mtypes.h"
31 #include "main/condrender.h"
32 #include "swrast/swrast.h"
33 #include "drivers/common/meta.h"
34
35 #include "intel_batchbuffer.h"
36 #include "intel_blit.h"
37 #include "intel_fbo.h"
38 #include "intel_mipmap_tree.h"
39 #include "intel_regions.h"
40
41 #include "brw_context.h"
42 #include "brw_blorp.h"
43
44 #define FILE_DEBUG_FLAG DEBUG_BLIT
45
46 static const char *buffer_names[] = {
47 [BUFFER_FRONT_LEFT] = "front",
48 [BUFFER_BACK_LEFT] = "back",
49 [BUFFER_FRONT_RIGHT] = "front right",
50 [BUFFER_BACK_RIGHT] = "back right",
51 [BUFFER_DEPTH] = "depth",
52 [BUFFER_STENCIL] = "stencil",
53 [BUFFER_ACCUM] = "accum",
54 [BUFFER_AUX0] = "aux0",
55 [BUFFER_COLOR0] = "color0",
56 [BUFFER_COLOR1] = "color1",
57 [BUFFER_COLOR2] = "color2",
58 [BUFFER_COLOR3] = "color3",
59 [BUFFER_COLOR4] = "color4",
60 [BUFFER_COLOR5] = "color5",
61 [BUFFER_COLOR6] = "color6",
62 [BUFFER_COLOR7] = "color7",
63 };
64
65 static void
66 debug_mask(const char *name, GLbitfield mask)
67 {
68 GLuint i;
69
70 if (unlikely(INTEL_DEBUG & DEBUG_BLIT)) {
71 DBG("%s clear:", name);
72 for (i = 0; i < BUFFER_COUNT; i++) {
73 if (mask & (1 << i))
74 DBG(" %s", buffer_names[i]);
75 }
76 DBG("\n");
77 }
78 }
79
80 /**
81 * Returns true if the scissor is a noop (cuts out nothing).
82 */
83 static bool
84 noop_scissor(struct gl_context *ctx, struct gl_framebuffer *fb)
85 {
86 return ctx->Scissor.X <= 0 &&
87 ctx->Scissor.Y <= 0 &&
88 ctx->Scissor.Width >= fb->Width &&
89 ctx->Scissor.Height >= fb->Height;
90 }
91
92 /**
93 * Implements fast depth clears on gen6+.
94 *
95 * Fast clears basically work by setting a flag in each of the subspans
96 * represented in the HiZ buffer that says "When you need the depth values for
97 * this subspan, it's the hardware's current clear value." Then later rendering
98 * can just use the static clear value instead of referencing memory.
99 *
100 * The tricky part of the implementation is that you have to have the clear
101 * value that was used on the depth buffer in place for all further rendering,
102 * at least until a resolve to the real depth buffer happens.
103 */
104 static bool
105 brw_fast_clear_depth(struct gl_context *ctx)
106 {
107 struct brw_context *brw = brw_context(ctx);
108 struct intel_context *intel = intel_context(ctx);
109 struct gl_framebuffer *fb = ctx->DrawBuffer;
110 struct intel_renderbuffer *depth_irb =
111 intel_get_renderbuffer(fb, BUFFER_DEPTH);
112 struct intel_mipmap_tree *mt = depth_irb->mt;
113
114 if (intel->gen < 6)
115 return false;
116
117 if (!intel_renderbuffer_has_hiz(depth_irb))
118 return false;
119
120 /* We only handle full buffer clears -- otherwise you'd have to track whether
121 * a previous clear had happened at a different clear value and resolve it
122 * first.
123 */
124 if (ctx->Scissor.Enabled && !noop_scissor(ctx, fb)) {
125 perf_debug("Failed to fast clear depth due to scissor being enabled. "
126 "Possible 5%% performance win if avoided.\n");
127 return false;
128 }
129
130 uint32_t depth_clear_value;
131 switch (mt->format) {
132 case MESA_FORMAT_Z32_FLOAT_X24S8:
133 case MESA_FORMAT_S8_Z24:
134 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
135 *
136 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
137 * enabled (the legacy method of clearing must be performed):
138 *
139 * - If the depth buffer format is D32_FLOAT_S8X24_UINT or
140 * D24_UNORM_S8_UINT.
141 */
142 return false;
143
144 case MESA_FORMAT_Z32_FLOAT:
145 depth_clear_value = float_as_int(ctx->Depth.Clear);
146 break;
147
148 case MESA_FORMAT_Z16:
149 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
150 *
151 * "[DevSNB+]: Several cases exist where Depth Buffer Clear cannot be
152 * enabled (the legacy method of clearing must be performed):
153 *
154 * - DevSNB{W/A}]: When depth buffer format is D16_UNORM and the
155 * width of the map (LOD0) is not multiple of 16, fast clear
156 * optimization must be disabled.
157 */
158 if (intel->gen == 6 && (mt->level[depth_irb->mt_level].width % 16) != 0)
159 return false;
160 /* FALLTHROUGH */
161
162 default:
163 depth_clear_value = fb->_DepthMax * ctx->Depth.Clear;
164 break;
165 }
166
167 /* If we're clearing to a new clear value, then we need to resolve any clear
168 * flags out of the HiZ buffer into the real depth buffer.
169 */
170 if (mt->depth_clear_value != depth_clear_value) {
171 intel_miptree_all_slices_resolve_depth(brw, mt);
172 mt->depth_clear_value = depth_clear_value;
173 }
174
175 /* From the Sandy Bridge PRM, volume 2 part 1, page 313:
176 *
177 * "If other rendering operations have preceded this clear, a
178 * PIPE_CONTROL with write cache flush enabled and Z-inhibit disabled
179 * must be issued before the rectangle primitive used for the depth
180 * buffer clear operation.
181 */
182 intel_batchbuffer_emit_mi_flush(brw);
183
184 intel_hiz_exec(brw, mt, depth_irb->mt_level, depth_irb->mt_layer,
185 GEN6_HIZ_OP_DEPTH_CLEAR);
186
187 if (intel->gen == 6) {
188 /* From the Sandy Bridge PRM, volume 2 part 1, page 314:
189 *
190 * "DevSNB, DevSNB-B{W/A}]: Depth buffer clear pass must be followed
191 * by a PIPE_CONTROL command with DEPTH_STALL bit set and Then
192 * followed by Depth FLUSH'
193 */
194 intel_batchbuffer_emit_mi_flush(brw);
195 }
196
197 /* Now, the HiZ buffer contains data that needs to be resolved to the depth
198 * buffer.
199 */
200 intel_renderbuffer_set_needs_depth_resolve(depth_irb);
201
202 return true;
203 }
204
205 /**
206 * Called by ctx->Driver.Clear.
207 */
208 static void
209 brw_clear(struct gl_context *ctx, GLbitfield mask)
210 {
211 struct brw_context *brw = brw_context(ctx);
212 struct intel_context *intel = &brw->intel;
213 struct gl_framebuffer *fb = ctx->DrawBuffer;
214 bool partial_clear = ctx->Scissor.Enabled && !noop_scissor(ctx, fb);
215
216 if (!_mesa_check_conditional_render(ctx))
217 return;
218
219 if (mask & (BUFFER_BIT_FRONT_LEFT | BUFFER_BIT_FRONT_RIGHT)) {
220 intel->front_buffer_dirty = true;
221 }
222
223 intel_prepare_render(brw);
224 brw_workaround_depthstencil_alignment(brw, partial_clear ? 0 : mask);
225
226 if (mask & BUFFER_BIT_DEPTH) {
227 if (brw_fast_clear_depth(ctx)) {
228 DBG("fast clear: depth\n");
229 mask &= ~BUFFER_BIT_DEPTH;
230 }
231 }
232
233 /* BLORP is currently only supported on Gen6+. */
234 if (intel->gen >= 6) {
235 if (mask & BUFFER_BITS_COLOR) {
236 if (brw_blorp_clear_color(brw, fb, partial_clear)) {
237 debug_mask("blorp color", mask & BUFFER_BITS_COLOR);
238 mask &= ~BUFFER_BITS_COLOR;
239 }
240 }
241 }
242
243 GLbitfield tri_mask = mask & (BUFFER_BITS_COLOR |
244 BUFFER_BIT_STENCIL |
245 BUFFER_BIT_DEPTH);
246
247 if (tri_mask) {
248 debug_mask("tri", tri_mask);
249 mask &= ~tri_mask;
250
251 if (ctx->API == API_OPENGLES) {
252 _mesa_meta_Clear(&intel->ctx, tri_mask);
253 } else {
254 _mesa_meta_glsl_Clear(&intel->ctx, tri_mask);
255 }
256 }
257
258 /* Any strange buffers get passed off to swrast */
259 if (mask) {
260 debug_mask("swrast", mask);
261 _swrast_Clear(ctx, mask);
262 }
263 }
264
265
266 void
267 intelInitClearFuncs(struct dd_function_table *functions)
268 {
269 functions->Clear = brw_clear;
270 }