0e96f76a22ec39426f6b5d9fe1346578116e6f42
[mesa.git] / src / mesa / drivers / dri / i965 / brw_pipe_control.c
1 /*
2 * Copyright © 2010 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "brw_context.h"
25 #include "brw_defines.h"
26 #include "brw_state.h"
27 #include "intel_batchbuffer.h"
28 #include "intel_fbo.h"
29
30 /**
31 * Emit a PIPE_CONTROL with various flushing flags.
32 *
33 * The caller is responsible for deciding what flags are appropriate for the
34 * given generation.
35 */
36 void
37 brw_emit_pipe_control_flush(struct brw_context *brw, uint32_t flags)
38 {
39 const struct gen_device_info *devinfo = &brw->screen->devinfo;
40
41 if (devinfo->gen >= 6 &&
42 (flags & PIPE_CONTROL_CACHE_FLUSH_BITS) &&
43 (flags & PIPE_CONTROL_CACHE_INVALIDATE_BITS)) {
44 /* A pipe control command with flush and invalidate bits set
45 * simultaneously is an inherently racy operation on Gen6+ if the
46 * contents of the flushed caches were intended to become visible from
47 * any of the invalidated caches. Split it in two PIPE_CONTROLs, the
48 * first one should stall the pipeline to make sure that the flushed R/W
49 * caches are coherent with memory once the specified R/O caches are
50 * invalidated. On pre-Gen6 hardware the (implicit) R/O cache
51 * invalidation seems to happen at the bottom of the pipeline together
52 * with any write cache flush, so this shouldn't be a concern. In order
53 * to ensure a full stall, we do an end-of-pipe sync.
54 */
55 brw_emit_end_of_pipe_sync(brw, (flags & PIPE_CONTROL_CACHE_FLUSH_BITS));
56 flags &= ~(PIPE_CONTROL_CACHE_FLUSH_BITS | PIPE_CONTROL_CS_STALL);
57 }
58
59 brw->vtbl.emit_raw_pipe_control(brw, flags, NULL, 0, 0);
60 }
61
62 /**
63 * Emit a PIPE_CONTROL that writes to a buffer object.
64 *
65 * \p flags should contain one of the following items:
66 * - PIPE_CONTROL_WRITE_IMMEDIATE
67 * - PIPE_CONTROL_WRITE_TIMESTAMP
68 * - PIPE_CONTROL_WRITE_DEPTH_COUNT
69 */
70 void
71 brw_emit_pipe_control_write(struct brw_context *brw, uint32_t flags,
72 struct brw_bo *bo, uint32_t offset,
73 uint64_t imm)
74 {
75 brw->vtbl.emit_raw_pipe_control(brw, flags, bo, offset, imm);
76 }
77
78 /**
79 * Restriction [DevSNB, DevIVB]:
80 *
81 * Prior to changing Depth/Stencil Buffer state (i.e. any combination of
82 * 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS, 3DSTATE_STENCIL_BUFFER,
83 * 3DSTATE_HIER_DEPTH_BUFFER) SW must first issue a pipelined depth stall
84 * (PIPE_CONTROL with Depth Stall bit set), followed by a pipelined depth
85 * cache flush (PIPE_CONTROL with Depth Flush Bit set), followed by
86 * another pipelined depth stall (PIPE_CONTROL with Depth Stall bit set),
87 * unless SW can otherwise guarantee that the pipeline from WM onwards is
88 * already flushed (e.g., via a preceding MI_FLUSH).
89 */
90 void
91 brw_emit_depth_stall_flushes(struct brw_context *brw)
92 {
93 const struct gen_device_info *devinfo = &brw->screen->devinfo;
94
95 assert(devinfo->gen >= 6);
96
97 /* Starting on BDW, these pipe controls are unnecessary.
98 *
99 * WM HW will internally manage the draining pipe and flushing of the caches
100 * when this command is issued. The PIPE_CONTROL restrictions are removed.
101 */
102 if (devinfo->gen >= 8)
103 return;
104
105 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
106 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_CACHE_FLUSH);
107 brw_emit_pipe_control_flush(brw, PIPE_CONTROL_DEPTH_STALL);
108 }
109
110 /**
111 * From the Ivybridge PRM, Volume 2 Part 1, Section 3.2 (VS Stage Input):
112 * "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
113 * stall needs to be sent just prior to any 3DSTATE_VS, 3DSTATE_URB_VS,
114 * 3DSTATE_CONSTANT_VS, 3DSTATE_BINDING_TABLE_POINTER_VS,
115 * 3DSTATE_SAMPLER_STATE_POINTER_VS command. Only one PIPE_CONTROL needs
116 * to be sent before any combination of VS associated 3DSTATE."
117 */
118 void
119 gen7_emit_vs_workaround_flush(struct brw_context *brw)
120 {
121 MAYBE_UNUSED const struct gen_device_info *devinfo = &brw->screen->devinfo;
122
123 assert(devinfo->gen == 7);
124 brw_emit_pipe_control_write(brw,
125 PIPE_CONTROL_WRITE_IMMEDIATE
126 | PIPE_CONTROL_DEPTH_STALL,
127 brw->workaround_bo, 0, 0);
128 }
129
130 /**
131 * From the PRM, Volume 2a:
132 *
133 * "Indirect State Pointers Disable
134 *
135 * At the completion of the post-sync operation associated with this pipe
136 * control packet, the indirect state pointers in the hardware are
137 * considered invalid; the indirect pointers are not saved in the context.
138 * If any new indirect state commands are executed in the command stream
139 * while the pipe control is pending, the new indirect state commands are
140 * preserved.
141 *
142 * [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context
143 * restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant
144 * commands are only considered as Indirect State Pointers. Once ISP is
145 * issued in a context, SW must initialize by programming push constant
146 * commands for all the shaders (at least to zero length) before attempting
147 * any rendering operation for the same context."
148 *
149 * 3DSTATE_CONSTANT_* packets are restored during a context restore,
150 * even though they point to a BO that has been already unreferenced at
151 * the end of the previous batch buffer. This has been fine so far since
152 * we are protected by these scratch page (every address not covered by
153 * a BO should be pointing to the scratch page). But on CNL, it is
154 * causing a GPU hang during context restore at the 3DSTATE_CONSTANT_*
155 * instruction.
156 *
157 * The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the
158 * hardware to ignore previous 3DSTATE_CONSTANT_* packets during a
159 * context restore, so the mentioned hang doesn't happen. However,
160 * software must program push constant commands for all stages prior to
161 * rendering anything, so we flag them as dirty.
162 *
163 * Finally, we also make sure to stall at pixel scoreboard to make sure the
164 * constants have been loaded into the EUs prior to disable the push constants
165 * so that it doesn't hang a previous 3DPRIMITIVE.
166 */
167 void
168 gen10_emit_isp_disable(struct brw_context *brw)
169 {
170 brw->vtbl.emit_raw_pipe_control(brw,
171 PIPE_CONTROL_STALL_AT_SCOREBOARD |
172 PIPE_CONTROL_CS_STALL,
173 NULL, 0, 0);
174 brw->vtbl.emit_raw_pipe_control(brw,
175 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE |
176 PIPE_CONTROL_CS_STALL,
177 NULL, 0, 0);
178
179 brw->vs.base.push_constants_dirty = true;
180 brw->tcs.base.push_constants_dirty = true;
181 brw->tes.base.push_constants_dirty = true;
182 brw->gs.base.push_constants_dirty = true;
183 brw->wm.base.push_constants_dirty = true;
184 }
185
186 /**
187 * Emit a PIPE_CONTROL command for gen7 with the CS Stall bit set.
188 */
189 void
190 gen7_emit_cs_stall_flush(struct brw_context *brw)
191 {
192 brw_emit_pipe_control_write(brw,
193 PIPE_CONTROL_CS_STALL
194 | PIPE_CONTROL_WRITE_IMMEDIATE,
195 brw->workaround_bo, 0, 0);
196 }
197
198 /**
199 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
200 * implementing two workarounds on gen6. From section 1.4.7.1
201 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
202 *
203 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
204 * produced by non-pipelined state commands), software needs to first
205 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
206 * 0.
207 *
208 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
209 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
210 *
211 * And the workaround for these two requires this workaround first:
212 *
213 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
214 * BEFORE the pipe-control with a post-sync op and no write-cache
215 * flushes.
216 *
217 * And this last workaround is tricky because of the requirements on
218 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
219 * volume 2 part 1:
220 *
221 * "1 of the following must also be set:
222 * - Render Target Cache Flush Enable ([12] of DW1)
223 * - Depth Cache Flush Enable ([0] of DW1)
224 * - Stall at Pixel Scoreboard ([1] of DW1)
225 * - Depth Stall ([13] of DW1)
226 * - Post-Sync Operation ([13] of DW1)
227 * - Notify Enable ([8] of DW1)"
228 *
229 * The cache flushes require the workaround flush that triggered this
230 * one, so we can't use it. Depth stall would trigger the same.
231 * Post-sync nonzero is what triggered this second workaround, so we
232 * can't use that one either. Notify enable is IRQs, which aren't
233 * really our business. That leaves only stall at scoreboard.
234 */
235 void
236 brw_emit_post_sync_nonzero_flush(struct brw_context *brw)
237 {
238 brw_emit_pipe_control_flush(brw,
239 PIPE_CONTROL_CS_STALL |
240 PIPE_CONTROL_STALL_AT_SCOREBOARD);
241
242 brw_emit_pipe_control_write(brw, PIPE_CONTROL_WRITE_IMMEDIATE,
243 brw->workaround_bo, 0, 0);
244 }
245
246 /*
247 * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
248 *
249 * Write synchronization is a special case of end-of-pipe
250 * synchronization that requires that the render cache and/or depth
251 * related caches are flushed to memory, where the data will become
252 * globally visible. This type of synchronization is required prior to
253 * SW (CPU) actually reading the result data from memory, or initiating
254 * an operation that will use as a read surface (such as a texture
255 * surface) a previous render target and/or depth/stencil buffer
256 *
257 *
258 * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
259 *
260 * Exercising the write cache flush bits (Render Target Cache Flush
261 * Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
262 * ensures the write caches are flushed and doesn't guarantee the data
263 * is globally visible.
264 *
265 * SW can track the completion of the end-of-pipe-synchronization by
266 * using "Notify Enable" and "PostSync Operation - Write Immediate
267 * Data" in the PIPE_CONTROL command.
268 */
269 void
270 brw_emit_end_of_pipe_sync(struct brw_context *brw, uint32_t flags)
271 {
272 const struct gen_device_info *devinfo = &brw->screen->devinfo;
273
274 if (devinfo->gen >= 6) {
275 /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
276 *
277 * "The most common action to perform upon reaching a synchronization
278 * point is to write a value out to memory. An immediate value
279 * (included with the synchronization command) may be written."
280 *
281 *
282 * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
283 *
284 * "In case the data flushed out by the render engine is to be read
285 * back in to the render engine in coherent manner, then the render
286 * engine has to wait for the fence completion before accessing the
287 * flushed data. This can be achieved by following means on various
288 * products: PIPE_CONTROL command with CS Stall and the required
289 * write caches flushed with Post-Sync-Operation as Write Immediate
290 * Data.
291 *
292 * Example:
293 * - Workload-1 (3D/GPGPU/MEDIA)
294 * - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write Immediate
295 * Data, Required Write Cache Flush bits set)
296 * - Workload-2 (Can use the data produce or output by Workload-1)
297 */
298 brw_emit_pipe_control_write(brw,
299 flags | PIPE_CONTROL_CS_STALL |
300 PIPE_CONTROL_WRITE_IMMEDIATE,
301 brw->workaround_bo, 0, 0);
302
303 if (devinfo->is_haswell) {
304 /* Haswell needs addition work-arounds:
305 *
306 * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
307 *
308 * Option 1:
309 * PIPE_CONTROL command with the CS Stall and the required write
310 * caches flushed with Post-SyncOperation as Write Immediate Data
311 * followed by eight dummy MI_STORE_DATA_IMM (write to scratch
312 * spce) commands.
313 *
314 * Example:
315 * - Workload-1
316 * - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
317 * Immediate Data, Required Write Cache Flush bits set)
318 * - MI_STORE_DATA_IMM (8 times) (Dummy data, Scratch Address)
319 * - Workload-2 (Can use the data produce or output by
320 * Workload-1)
321 *
322 * Unfortunately, both the PRMs and the internal docs are a bit
323 * out-of-date in this regard. What the windows driver does (and
324 * this appears to actually work) is to emit a register read from the
325 * memory address written by the pipe control above.
326 *
327 * What register we load into doesn't matter. We choose an indirect
328 * rendering register because we know it always exists and it's one
329 * of the first registers the command parser allows us to write. If
330 * you don't have command parser support in your kernel (pre-4.2),
331 * this will get turned into MI_NOOP and you won't get the
332 * workaround. Unfortunately, there's just not much we can do in
333 * that case. This register is perfectly safe to write since we
334 * always re-load all of the indirect draw registers right before
335 * 3DPRIMITIVE when needed anyway.
336 */
337 brw_load_register_mem(brw, GEN7_3DPRIM_START_INSTANCE,
338 brw->workaround_bo, 0);
339 }
340 } else {
341 /* On gen4-5, a regular pipe control seems to suffice. */
342 brw_emit_pipe_control_flush(brw, flags);
343 }
344 }
345
346 /* Emit a pipelined flush to either flush render and texture cache for
347 * reading from a FBO-drawn texture, or flush so that frontbuffer
348 * render appears on the screen in DRI1.
349 *
350 * This is also used for the always_flush_cache driconf debug option.
351 */
352 void
353 brw_emit_mi_flush(struct brw_context *brw)
354 {
355 const struct gen_device_info *devinfo = &brw->screen->devinfo;
356
357 int flags = PIPE_CONTROL_RENDER_TARGET_FLUSH;
358 if (devinfo->gen >= 6) {
359 flags |= PIPE_CONTROL_INSTRUCTION_INVALIDATE |
360 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
361 PIPE_CONTROL_DATA_CACHE_FLUSH |
362 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
363 PIPE_CONTROL_VF_CACHE_INVALIDATE |
364 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
365 PIPE_CONTROL_CS_STALL;
366 }
367 brw_emit_pipe_control_flush(brw, flags);
368 }
369
370 int
371 brw_init_pipe_control(struct brw_context *brw,
372 const struct gen_device_info *devinfo)
373 {
374 switch (devinfo->gen) {
375 case 11:
376 brw->vtbl.emit_raw_pipe_control = gen11_emit_raw_pipe_control;
377 break;
378 case 10:
379 brw->vtbl.emit_raw_pipe_control = gen10_emit_raw_pipe_control;
380 break;
381 case 9:
382 brw->vtbl.emit_raw_pipe_control = gen9_emit_raw_pipe_control;
383 break;
384 case 8:
385 brw->vtbl.emit_raw_pipe_control = gen8_emit_raw_pipe_control;
386 break;
387 case 7:
388 brw->vtbl.emit_raw_pipe_control =
389 devinfo->is_haswell ? gen75_emit_raw_pipe_control
390 : gen7_emit_raw_pipe_control;
391 break;
392 case 6:
393 brw->vtbl.emit_raw_pipe_control = gen6_emit_raw_pipe_control;
394 break;
395 case 5:
396 brw->vtbl.emit_raw_pipe_control = gen5_emit_raw_pipe_control;
397 break;
398 case 4:
399 brw->vtbl.emit_raw_pipe_control =
400 devinfo->is_g4x ? gen45_emit_raw_pipe_control
401 : gen4_emit_raw_pipe_control;
402 break;
403 }
404
405 if (devinfo->gen < 6)
406 return 0;
407
408 /* We can't just use brw_state_batch to get a chunk of space for
409 * the gen6 workaround because it involves actually writing to
410 * the buffer, and the kernel doesn't let us write to the batch.
411 */
412 brw->workaround_bo = brw_bo_alloc(brw->bufmgr, "workaround", 4096,
413 BRW_MEMZONE_OTHER);
414 if (brw->workaround_bo == NULL)
415 return -ENOMEM;
416
417 brw->pipe_controls_since_last_cs_stall = 0;
418
419 return 0;
420 }
421
422 void
423 brw_fini_pipe_control(struct brw_context *brw)
424 {
425 brw_bo_unreference(brw->workaround_bo);
426 }