i965/gen8+: Skip depth stalls on state change
[mesa.git] / src / mesa / drivers / dri / i965 / brw_pipe_control.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "intel_batchbuffer.h"
26 #include "intel_fbo.h"
27 #include "intel_reg.h"
28
29 /**
30 * According to the latest documentation, any PIPE_CONTROL with the
31 * "Command Streamer Stall" bit set must also have another bit set,
32 * with five different options:
33 *
34 * - Render Target Cache Flush
35 * - Depth Cache Flush
36 * - Stall at Pixel Scoreboard
37 * - Post-Sync Operation
38 * - Depth Stall
39 *
40 * I chose "Stall at Pixel Scoreboard" since we've used it effectively
41 * in the past, but the choice is fairly arbitrary.
42 */
43 static void
44 gen8_add_cs_stall_workaround_bits(uint32_t *flags)
45 {
46 uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
47 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
48 PIPE_CONTROL_WRITE_IMMEDIATE |
49 PIPE_CONTROL_WRITE_DEPTH_COUNT |
50 PIPE_CONTROL_WRITE_TIMESTAMP |
51 PIPE_CONTROL_STALL_AT_SCOREBOARD |
52 PIPE_CONTROL_DEPTH_STALL;
53
54 /* If we're doing a CS stall, and don't already have one of the
55 * workaround bits set, add "Stall at Pixel Scoreboard."
56 */
57 if ((*flags & PIPE_CONTROL_CS_STALL) != 0 && (*flags & wa_bits) == 0)
58 *flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
59 }
60
61 /* Implement the WaCsStallAtEveryFourthPipecontrol workaround on IVB, BYT:
62 *
63 * "Every 4th PIPE_CONTROL command, not counting the PIPE_CONTROL with
64 * only read-cache-invalidate bit(s) set, must have a CS_STALL bit set."
65 *
66 * Note that the kernel does CS stalls between batches, so we only need
67 * to count them within a batch.
68 */
69 static uint32_t
70 gen7_cs_stall_every_four_pipe_controls(struct brw_context *brw, uint32_t flags)
71 {
72 if (brw->gen == 7 && !brw->is_haswell) {
73 if (flags & PIPE_CONTROL_CS_STALL) {
74 /* If we're doing a CS stall, reset the counter and carry on. */
75 brw->pipe_controls_since_last_cs_stall = 0;
76 return 0;
77 }
78
79 /* If this is the fourth pipe control without a CS stall, do one now. */
80 if (++brw->pipe_controls_since_last_cs_stall == 4) {
81 brw->pipe_controls_since_last_cs_stall = 0;
82 return PIPE_CONTROL_CS_STALL;
83 }
84 }
85 return 0;
86 }
87
88 /**
89 * Emit a PIPE_CONTROL with various flushing flags.
90 *
91 * The caller is responsible for deciding what flags are appropriate for the
92 * given generation.
93 */
94 void
95 brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
96 {
97 if (brw->gen >= 8) {
98 gen8_add_cs_stall_workaround_bits(&flags);
99
100 BEGIN_BATCH(6);
101 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
102 OUT_BATCH(flags);
103 OUT_BATCH(0);
104 OUT_BATCH(0);
105 OUT_BATCH(0);
106 OUT_BATCH(0);
107 ADVANCE_BATCH();
108 } else if (brw->gen >= 6) {
109 flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
110
111 BEGIN_BATCH(5);
112 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
113 OUT_BATCH(flags);
114 OUT_BATCH(0);
115 OUT_BATCH(0);
116 OUT_BATCH(0);
117 ADVANCE_BATCH();
118 } else {
119 BEGIN_BATCH(4);
120 OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
121 OUT_BATCH(0);
122 OUT_BATCH(0);
123 OUT_BATCH(0);
124 ADVANCE_BATCH();
125 }
126 }
127
128 /**
129 * Emit a PIPE_CONTROL that writes to a buffer object.
130 *
131 * \p flags should contain one of the following items:
132 * - PIPE_CONTROL_WRITE_IMMEDIATE
133 * - PIPE_CONTROL_WRITE_TIMESTAMP
134 * - PIPE_CONTROL_WRITE_DEPTH_COUNT
135 */
136 void
137 brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
138 drm_intel_bo *bo, uint32_t offset,
139 uint32_t imm_lower, uint32_t imm_upper)
140 {
141 if (brw->gen >= 8) {
142 gen8_add_cs_stall_workaround_bits(&flags);
143
144 BEGIN_BATCH(6);
145 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
146 OUT_BATCH(flags);
147 OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
148 offset);
149 OUT_BATCH(imm_lower);
150 OUT_BATCH(imm_upper);
151 ADVANCE_BATCH();
152 } else if (brw->gen >= 6) {
153 flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
154
155 /* PPGTT/GGTT is selected by DW2 bit 2 on Sandybridge, but DW1 bit 24
156 * on later platforms. We always use PPGTT on Gen7+.
157 */
158 unsigned gen6_gtt = brw->gen == 6 ? PIPE_CONTROL_GLOBAL_GTT_WRITE : 0;
159
160 BEGIN_BATCH(5);
161 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
162 OUT_BATCH(flags);
163 OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
164 gen6_gtt | offset);
165 OUT_BATCH(imm_lower);
166 OUT_BATCH(imm_upper);
167 ADVANCE_BATCH();
168 } else {
169 BEGIN_BATCH(4);
170 OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
171 OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
172 PIPE_CONTROL_GLOBAL_GTT_WRITE | offset);
173 OUT_BATCH(imm_lower);
174 OUT_BATCH(imm_upper);
175 ADVANCE_BATCH();
176 }
177 }
178
179 /**
180 * Restriction [DevSNB, DevIVB]:
181 *
182 * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
183 * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
184 * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
185 * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
186 * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
187 * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
188 * unless SW can otherwise guarantee that the pipeline from WM onwards is
189 * already flushed (e.g., via a preceding MI_FLUSH).
190 */
191 void
192 brw_emit_depth_stall_flushes(struct brw_context *brw)
193 {
194 assert(brw->gen >= 6 && brw->gen <= 9);
195
196 /* Starting on BDW, these pipe controls are unnecessary.
197 *
198 * WM HW will internally manage the draining pipe and flushing of the caches
199 * when this command is issued. The PIPE_CONTROL restrictions are removed.
200 */
201 if (brw->gen >= 8)
202 return;
203
204 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
205 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH);
206 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
207 }
208
209 /**
210 * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
211 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
212 * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
213 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
214 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
215 * to be sent before any combination of VS associated 3DSTATE."
216 */
217 void
218 gen7_emit_vs_workaround_flush(struct brw_context *brw)
219 {
220 assert(brw->gen == 7);
221 brw_emit_pipe_control_write(brw,
222 PIPE_CONTROL_WRITE_IMMEDIATE
223 | PIPE_CONTROL_DEPTH_STALL,
224 brw->workaround_bo, 0,
225 0, 0);
226 }
227
228
229 /**
230 * Emit a PIPE_CONTROL command for gen7 with the CS Stall bit set.
231 */
232 void
233 gen7_emit_cs_stall_flush(struct brw_context *brw)
234 {
235 brw_emit_pipe_control_write(brw,
236 PIPE_CONTROL_CS_STALL
237 | PIPE_CONTROL_WRITE_IMMEDIATE,
238 brw->workaround_bo, 0,
239 0, 0);
240 }
241
242
243 /**
244 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
245 * implementing two workarounds on gen6. From section 1.4.7.1
246 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
247 *
248 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
249 * produced by non-pipelined state commands), software needs to first
250 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
251 * 0.
252 *
253 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
254 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
255 *
256 * And the workaround for these two requires this workaround first:
257 *
258 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
259 * BEFORE the pipe-control with a post-sync op and no write-cache
260 * flushes.
261 *
262 * And this last workaround is tricky because of the requirements on
263 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
264 * volume 2 part 1:
265 *
266 * "1 of the following must also be set:
267 * - Render Target Cache Flush Enable ([12] of DW1)
268 * - Depth Cache Flush Enable ([0] of DW1)
269 * - Stall at Pixel Scoreboard ([1] of DW1)
270 * - Depth Stall ([13] of DW1)
271 * - Post-Sync Operation ([13] of DW1)
272 * - Notify Enable ([8] of DW1)"
273 *
274 * The cache flushes require the workaround flush that triggered this
275 * one, so we can't use it. Depth stall would trigger the same.
276 * Post-sync nonzero is what triggered this second workaround, so we
277 * can't use that one either. Notify enable is IRQs, which aren't
278 * really our business. That leaves only stall at scoreboard.
279 */
280 void
281 brw_emit_post_sync_nonzero_flush(struct brw_context *brw)
282 {
283 brw_emit_pipe_control_flush(brw,
284 PIPE_CONTROL_CS_STALL |
285 PIPE_CONTROL_STALL_AT_SCOREBOARD);
286
287 brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
288 brw->workaround_bo, 0, 0, 0);
289 }
290
291 /* Emit a pipelined flush to either flush render and texture cache for
292 * reading from a FBO-drawn texture, or flush so that frontbuffer
293 * render appears on the screen in DRI1.
294 *
295 * This is also used for the always_flush_cache driconf debug option.
296 */
297 void
298 brw_emit_mi_flush(struct brw_context *brw)
299 {
300 if (brw->batch.ring == BLT_RING && brw->gen >= 6) {
301 BEGIN_BATCH_BLT(4);
302 OUT_BATCH(MI_FLUSH_DW);
303 OUT_BATCH(0);
304 OUT_BATCH(0);
305 OUT_BATCH(0);
306 ADVANCE_BATCH();
307 } else {
308 int flags = PIPE_CONTROL_NO_WRITE | PIPE_CONTROL_RENDER_TARGET_FLUSH;
309 if (brw->gen >= 6) {
310 if (brw->gen == 9) {
311 /* Hardware workaround: SKL
312 *
313 * Emit Pipe Control with all bits set to zero before emitting
314 * a Pipe Control with VF Cache Invalidate set.
315 */
316 brw_emit_pipe_control_flush(brw, 0);
317 }
318
319 flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |
320 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
321 PIPE_CONTROL_VF_CACHE_INVALIDATE |
322 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
323 PIPE_CONTROL_CS_STALL;
324
325 if (brw->gen == 6) {
326 /* Hardware workaround: SNB B-Spec says:
327 *
328 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
329 * Flush Enable =1, a PIPE_CONTROL with any non-zero
330 * post-sync-op is required.
331 */
332 brw_emit_post_sync_nonzero_flush(brw);
333 }
334 }
335 brw_emit_pipe_control_flush(brw, flags);
336 }
337
338 brw_render_cache_set_clear(brw);
339 }
340
341 int
342 brw_init_pipe_control(struct brw_context *brw,
343 const struct brw_device_info *devinfo)
344 {
345 if (devinfo->gen < 6)
346 return 0;
347
348 /* We can't just use brw_state_batch to get a chunk of space for
349 * the gen6 workaround because it involves actually writing to
350 * the buffer, and the kernel doesn't let us write to the batch.
351 */
352 brw->workaround_bo = drm_intel_bo_alloc(brw->bufmgr,
353 "pipe_control workaround",
354 4096, 4096);
355 if (brw->workaround_bo == NULL)
356 return -ENOMEM;
357
358 brw->pipe_controls_since_last_cs_stall = 0;
359
360 return 0;
361 }
362
363 void
364 brw_fini_pipe_control(struct brw_context *brw)
365 {
366 drm_intel_bo_unreference(brw->workaround_bo);
367 }