i965: Transplant PIPE_CONTROL routines to brw_pipe_control
[mesa.git] / src / mesa / drivers / dri / i965 / brw_pipe_control.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "intel_batchbuffer.h"
26 #include "intel_fbo.h"
27 #include "intel_reg.h"
28
29 /**
30 * According to the latest documentation, any PIPE_CONTROL with the
31 * "Command Streamer Stall" bit set must also have another bit set,
32 * with five different options:
33 *
34 * - Render Target Cache Flush
35 * - Depth Cache Flush
36 * - Stall at Pixel Scoreboard
37 * - Post-Sync Operation
38 * - Depth Stall
39 *
40 * I chose "Stall at Pixel Scoreboard" since we've used it effectively
41 * in the past, but the choice is fairly arbitrary.
42 */
43 static void
44 gen8_add_cs_stall_workaround_bits(uint32_t *flags)
45 {
46 uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
47 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
48 PIPE_CONTROL_WRITE_IMMEDIATE |
49 PIPE_CONTROL_WRITE_DEPTH_COUNT |
50 PIPE_CONTROL_WRITE_TIMESTAMP |
51 PIPE_CONTROL_STALL_AT_SCOREBOARD |
52 PIPE_CONTROL_DEPTH_STALL;
53
54 /* If we're doing a CS stall, and don't already have one of the
55 * workaround bits set, add "Stall at Pixel Scoreboard."
56 */
57 if ((*flags & PIPE_CONTROL_CS_STALL) != 0 && (*flags & wa_bits) == 0)
58 *flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
59 }
60
61 /* Implement the WaCsStallAtEveryFourthPipecontrol workaround on IVB, BYT:
62 *
63 * "Every 4th PIPE_CONTROL command, not counting the PIPE_CONTROL with
64 * only read-cache-invalidate bit(s) set, must have a CS_STALL bit set."
65 *
66 * Note that the kernel does CS stalls between batches, so we only need
67 * to count them within a batch.
68 */
69 static uint32_t
70 gen7_cs_stall_every_four_pipe_controls(struct brw_context *brw, uint32_t flags)
71 {
72 if (brw->gen == 7 && !brw->is_haswell) {
73 if (flags & PIPE_CONTROL_CS_STALL) {
74 /* If we're doing a CS stall, reset the counter and carry on. */
75 brw->batch.pipe_controls_since_last_cs_stall = 0;
76 return 0;
77 }
78
79 /* If this is the fourth pipe control without a CS stall, do one now. */
80 if (++brw->batch.pipe_controls_since_last_cs_stall == 4) {
81 brw->batch.pipe_controls_since_last_cs_stall = 0;
82 return PIPE_CONTROL_CS_STALL;
83 }
84 }
85 return 0;
86 }
87
88 /**
89 * Emit a PIPE_CONTROL with various flushing flags.
90 *
91 * The caller is responsible for deciding what flags are appropriate for the
92 * given generation.
93 */
94 void
95 brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
96 {
97 if (brw->gen >= 8) {
98 gen8_add_cs_stall_workaround_bits(&flags);
99
100 BEGIN_BATCH(6);
101 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
102 OUT_BATCH(flags);
103 OUT_BATCH(0);
104 OUT_BATCH(0);
105 OUT_BATCH(0);
106 OUT_BATCH(0);
107 ADVANCE_BATCH();
108 } else if (brw->gen >= 6) {
109 flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
110
111 BEGIN_BATCH(5);
112 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
113 OUT_BATCH(flags);
114 OUT_BATCH(0);
115 OUT_BATCH(0);
116 OUT_BATCH(0);
117 ADVANCE_BATCH();
118 } else {
119 BEGIN_BATCH(4);
120 OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
121 OUT_BATCH(0);
122 OUT_BATCH(0);
123 OUT_BATCH(0);
124 ADVANCE_BATCH();
125 }
126 }
127
128 /**
129 * Emit a PIPE_CONTROL that writes to a buffer object.
130 *
131 * \p flags should contain one of the following items:
132 * - PIPE_CONTROL_WRITE_IMMEDIATE
133 * - PIPE_CONTROL_WRITE_TIMESTAMP
134 * - PIPE_CONTROL_WRITE_DEPTH_COUNT
135 */
136 void
137 brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
138 drm_intel_bo *bo, uint32_t offset,
139 uint32_t imm_lower, uint32_t imm_upper)
140 {
141 if (brw->gen >= 8) {
142 gen8_add_cs_stall_workaround_bits(&flags);
143
144 BEGIN_BATCH(6);
145 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (6 - 2));
146 OUT_BATCH(flags);
147 OUT_RELOC64(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
148 offset);
149 OUT_BATCH(imm_lower);
150 OUT_BATCH(imm_upper);
151 ADVANCE_BATCH();
152 } else if (brw->gen >= 6) {
153 flags |= gen7_cs_stall_every_four_pipe_controls(brw, flags);
154
155 /* PPGTT/GGTT is selected by DW2 bit 2 on Sandybridge, but DW1 bit 24
156 * on later platforms. We always use PPGTT on Gen7+.
157 */
158 unsigned gen6_gtt = brw->gen == 6 ? PIPE_CONTROL_GLOBAL_GTT_WRITE : 0;
159
160 BEGIN_BATCH(5);
161 OUT_BATCH(_3DSTATE_PIPE_CONTROL | (5 - 2));
162 OUT_BATCH(flags);
163 OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
164 gen6_gtt | offset);
165 OUT_BATCH(imm_lower);
166 OUT_BATCH(imm_upper);
167 ADVANCE_BATCH();
168 } else {
169 BEGIN_BATCH(4);
170 OUT_BATCH(_3DSTATE_PIPE_CONTROL | flags | (4 - 2));
171 OUT_RELOC(bo, I915_GEM_DOMAIN_INSTRUCTION, I915_GEM_DOMAIN_INSTRUCTION,
172 PIPE_CONTROL_GLOBAL_GTT_WRITE | offset);
173 OUT_BATCH(imm_lower);
174 OUT_BATCH(imm_upper);
175 ADVANCE_BATCH();
176 }
177 }
178
179 /**
180 * Restriction [DevSNB, DevIVB]:
181 *
182 * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
183 * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
184 * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
185 * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
186 * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
187 * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
188 * unless SW can otherwise guarantee that the pipeline from WM onwards is
189 * already flushed (e.g., via a preceding MI_FLUSH).
190 */
191 void
192 intel_emit_depth_stall_flushes(struct brw_context *brw)
193 {
194 assert(brw->gen >= 6 && brw->gen <= 9);
195
196 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
197 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH);
198 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
199 }
200
201 /**
202 * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
203 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
204 * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
205 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
206 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
207 * to be sent before any combination of VS associated 3DSTATE."
208 */
209 void
210 gen7_emit_vs_workaround_flush(struct brw_context *brw)
211 {
212 assert(brw->gen == 7);
213 brw_emit_pipe_control_write(brw,
214 PIPE_CONTROL_WRITE_IMMEDIATE
215 | PIPE_CONTROL_DEPTH_STALL,
216 brw->batch.workaround_bo, 0,
217 0, 0);
218 }
219
220
221 /**
222 * Emit a PIPE_CONTROL command for gen7 with the CS Stall bit set.
223 */
224 void
225 gen7_emit_cs_stall_flush(struct brw_context *brw)
226 {
227 brw_emit_pipe_control_write(brw,
228 PIPE_CONTROL_CS_STALL
229 | PIPE_CONTROL_WRITE_IMMEDIATE,
230 brw->batch.workaround_bo, 0,
231 0, 0);
232 }
233
234
235 /**
236 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
237 * implementing two workarounds on gen6. From section 1.4.7.1
238 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
239 *
240 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
241 * produced by non-pipelined state commands), software needs to first
242 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
243 * 0.
244 *
245 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
246 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
247 *
248 * And the workaround for these two requires this workaround first:
249 *
250 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
251 * BEFORE the pipe-control with a post-sync op and no write-cache
252 * flushes.
253 *
254 * And this last workaround is tricky because of the requirements on
255 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
256 * volume 2 part 1:
257 *
258 * "1 of the following must also be set:
259 * - Render Target Cache Flush Enable ([12] of DW1)
260 * - Depth Cache Flush Enable ([0] of DW1)
261 * - Stall at Pixel Scoreboard ([1] of DW1)
262 * - Depth Stall ([13] of DW1)
263 * - Post-Sync Operation ([13] of DW1)
264 * - Notify Enable ([8] of DW1)"
265 *
266 * The cache flushes require the workaround flush that triggered this
267 * one, so we can't use it. Depth stall would trigger the same.
268 * Post-sync nonzero is what triggered this second workaround, so we
269 * can't use that one either. Notify enable is IRQs, which aren't
270 * really our business. That leaves only stall at scoreboard.
271 */
272 void
273 intel_emit_post_sync_nonzero_flush(struct brw_context *brw)
274 {
275 brw_emit_pipe_control_flush(brw,
276 PIPE_CONTROL_CS_STALL |
277 PIPE_CONTROL_STALL_AT_SCOREBOARD);
278
279 brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
280 brw->batch.workaround_bo, 0, 0, 0);
281 }
282
283 /* Emit a pipelined flush to either flush render and texture cache for
284 * reading from a FBO-drawn texture, or flush so that frontbuffer
285 * render appears on the screen in DRI1.
286 *
287 * This is also used for the always_flush_cache driconf debug option.
288 */
289 void
290 intel_batchbuffer_emit_mi_flush(struct brw_context *brw)
291 {
292 if (brw->batch.ring == BLT_RING && brw->gen >= 6) {
293 BEGIN_BATCH_BLT(4);
294 OUT_BATCH(MI_FLUSH_DW);
295 OUT_BATCH(0);
296 OUT_BATCH(0);
297 OUT_BATCH(0);
298 ADVANCE_BATCH();
299 } else {
300 int flags = PIPE_CONTROL_NO_WRITE | PIPE_CONTROL_RENDER_TARGET_FLUSH;
301 if (brw->gen >= 6) {
302 if (brw->gen == 9) {
303 /* Hardware workaround: SKL
304 *
305 * Emit Pipe Control with all bits set to zero before emitting
306 * a Pipe Control with VF Cache Invalidate set.
307 */
308 brw_emit_pipe_control_flush(brw, 0);
309 }
310
311 flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |
312 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
313 PIPE_CONTROL_VF_CACHE_INVALIDATE |
314 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
315 PIPE_CONTROL_CS_STALL;
316
317 if (brw->gen == 6) {
318 /* Hardware workaround: SNB B-Spec says:
319 *
320 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache
321 * Flush Enable =1, a PIPE_CONTROL with any non-zero
322 * post-sync-op is required.
323 */
324 intel_emit_post_sync_nonzero_flush(brw);
325 }
326 }
327 brw_emit_pipe_control_flush(brw, flags);
328 }
329
330 brw_render_cache_set_clear(brw);
331 }