i965: Move intel_region_get_aligned_offset() to be a miptree function.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_regions.h"
38
39 #include "brw_context.h"
40 #include "brw_state.h"
41 #include "brw_defines.h"
42
43 #include "main/fbobject.h"
44 #include "main/glformats.h"
45
46 /* Constant single cliprect for framebuffer object or DRI2 drawing */
47 static void upload_drawing_rect(struct brw_context *brw)
48 {
49 struct gl_context *ctx = &brw->ctx;
50
51 /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined. */
52 if (brw->gen == 6)
53 intel_emit_post_sync_nonzero_flush(brw);
54
55 BEGIN_BATCH(4);
56 OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
57 OUT_BATCH(0); /* xmin, ymin */
58 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
59 ((ctx->DrawBuffer->Height - 1) << 16));
60 OUT_BATCH(0);
61 ADVANCE_BATCH();
62 }
63
64 const struct brw_tracked_state brw_drawing_rect = {
65 .dirty = {
66 .mesa = _NEW_BUFFERS,
67 .brw = BRW_NEW_CONTEXT,
68 .cache = 0
69 },
70 .emit = upload_drawing_rect
71 };
72
73 /**
74 * Upload pointers to the per-stage state.
75 *
76 * The state pointers in this packet are all relative to the general state
77 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
78 */
79 static void upload_pipelined_state_pointers(struct brw_context *brw )
80 {
81 if (brw->gen == 5) {
82 /* Need to flush before changing clip max threads for errata. */
83 BEGIN_BATCH(1);
84 OUT_BATCH(MI_FLUSH);
85 ADVANCE_BATCH();
86 }
87
88 BEGIN_BATCH(7);
89 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
90 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
91 brw->vs.base.state_offset);
92 if (brw->ff_gs.prog_active)
93 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
94 brw->ff_gs.state_offset | 1);
95 else
96 OUT_BATCH(0);
97 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
98 brw->clip.state_offset | 1);
99 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
100 brw->sf.state_offset);
101 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
102 brw->wm.base.state_offset);
103 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
104 brw->cc.state_offset);
105 ADVANCE_BATCH();
106
107 brw->state.dirty.brw |= BRW_NEW_PSP;
108 }
109
110 static void upload_psp_urb_cbs(struct brw_context *brw )
111 {
112 upload_pipelined_state_pointers(brw);
113 brw_upload_urb_fence(brw);
114 brw_upload_cs_urb_state(brw);
115 }
116
117 const struct brw_tracked_state brw_psp_urb_cbs = {
118 .dirty = {
119 .mesa = 0,
120 .brw = (BRW_NEW_URB_FENCE |
121 BRW_NEW_BATCH |
122 BRW_NEW_STATE_BASE_ADDRESS),
123 .cache = (CACHE_NEW_VS_UNIT |
124 CACHE_NEW_FF_GS_UNIT |
125 CACHE_NEW_FF_GS_PROG |
126 CACHE_NEW_CLIP_UNIT |
127 CACHE_NEW_SF_UNIT |
128 CACHE_NEW_WM_UNIT |
129 CACHE_NEW_CC_UNIT)
130 },
131 .emit = upload_psp_urb_cbs,
132 };
133
134 uint32_t
135 brw_depthbuffer_format(struct brw_context *brw)
136 {
137 struct gl_context *ctx = &brw->ctx;
138 struct gl_framebuffer *fb = ctx->DrawBuffer;
139 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
140 struct intel_renderbuffer *srb;
141
142 if (!drb &&
143 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
144 !srb->mt->stencil_mt &&
145 (intel_rb_format(srb) == MESA_FORMAT_Z24_UNORM_S8_UINT ||
146 intel_rb_format(srb) == MESA_FORMAT_Z32_FLOAT_S8X24_UINT)) {
147 drb = srb;
148 }
149
150 if (!drb)
151 return BRW_DEPTHFORMAT_D32_FLOAT;
152
153 return brw_depth_format(brw, drb->mt->format);
154 }
155
156 /**
157 * Returns the mask of how many bits of x and y must be handled through the
158 * depthbuffer's draw offset x and y fields.
159 *
160 * The draw offset x/y field of the depthbuffer packet is unfortunately shared
161 * between the depth, hiz, and stencil buffers. Because it can be hard to get
162 * all 3 to agree on this value, we want to do as much drawing offset
163 * adjustment as possible by moving the base offset of the 3 buffers, which is
164 * restricted to tile boundaries.
165 *
166 * For each buffer, the remainder must be applied through the x/y draw offset.
167 * This returns the worst-case mask of the low bits that have to go into the
168 * packet. If the 3 buffers don't agree on the drawing offset ANDed with this
169 * mask, then we're in trouble.
170 */
171 void
172 brw_get_depthstencil_tile_masks(struct intel_mipmap_tree *depth_mt,
173 uint32_t depth_level,
174 uint32_t depth_layer,
175 struct intel_mipmap_tree *stencil_mt,
176 uint32_t *out_tile_mask_x,
177 uint32_t *out_tile_mask_y)
178 {
179 uint32_t tile_mask_x = 0, tile_mask_y = 0;
180
181 if (depth_mt) {
182 intel_miptree_get_tile_masks(depth_mt, &tile_mask_x, &tile_mask_y, false);
183
184 if (intel_miptree_slice_has_hiz(depth_mt, depth_level, depth_layer)) {
185 uint32_t hiz_tile_mask_x, hiz_tile_mask_y;
186 intel_miptree_get_tile_masks(depth_mt->hiz_mt,
187 &hiz_tile_mask_x, &hiz_tile_mask_y,
188 false);
189
190 /* Each HiZ row represents 2 rows of pixels */
191 hiz_tile_mask_y = hiz_tile_mask_y << 1 | 1;
192
193 tile_mask_x |= hiz_tile_mask_x;
194 tile_mask_y |= hiz_tile_mask_y;
195 }
196 }
197
198 if (stencil_mt) {
199 if (stencil_mt->stencil_mt)
200 stencil_mt = stencil_mt->stencil_mt;
201
202 if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
203 /* Separate stencil buffer uses 64x64 tiles. */
204 tile_mask_x |= 63;
205 tile_mask_y |= 63;
206 } else {
207 uint32_t stencil_tile_mask_x, stencil_tile_mask_y;
208 intel_miptree_get_tile_masks(stencil_mt,
209 &stencil_tile_mask_x,
210 &stencil_tile_mask_y, false);
211
212 tile_mask_x |= stencil_tile_mask_x;
213 tile_mask_y |= stencil_tile_mask_y;
214 }
215 }
216
217 *out_tile_mask_x = tile_mask_x;
218 *out_tile_mask_y = tile_mask_y;
219 }
220
221 static struct intel_mipmap_tree *
222 get_stencil_miptree(struct intel_renderbuffer *irb)
223 {
224 if (!irb)
225 return NULL;
226 if (irb->mt->stencil_mt)
227 return irb->mt->stencil_mt;
228 return irb->mt;
229 }
230
231 void
232 brw_workaround_depthstencil_alignment(struct brw_context *brw,
233 GLbitfield clear_mask)
234 {
235 struct gl_context *ctx = &brw->ctx;
236 struct gl_framebuffer *fb = ctx->DrawBuffer;
237 bool rebase_depth = false;
238 bool rebase_stencil = false;
239 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
240 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
241 struct intel_mipmap_tree *depth_mt = NULL;
242 struct intel_mipmap_tree *stencil_mt = get_stencil_miptree(stencil_irb);
243 uint32_t tile_x = 0, tile_y = 0, stencil_tile_x = 0, stencil_tile_y = 0;
244 uint32_t stencil_draw_x = 0, stencil_draw_y = 0;
245 bool invalidate_depth = clear_mask & BUFFER_BIT_DEPTH;
246 bool invalidate_stencil = clear_mask & BUFFER_BIT_STENCIL;
247
248 if (depth_irb)
249 depth_mt = depth_irb->mt;
250
251 /* Initialize brw->depthstencil to 'nop' workaround state.
252 */
253 brw->depthstencil.tile_x = 0;
254 brw->depthstencil.tile_y = 0;
255 brw->depthstencil.depth_offset = 0;
256 brw->depthstencil.stencil_offset = 0;
257 brw->depthstencil.hiz_offset = 0;
258 brw->depthstencil.depth_mt = NULL;
259 brw->depthstencil.stencil_mt = NULL;
260 if (depth_irb)
261 brw->depthstencil.depth_mt = depth_mt;
262 if (stencil_irb)
263 brw->depthstencil.stencil_mt = get_stencil_miptree(stencil_irb);
264
265 /* Gen7+ doesn't require the workarounds, since we always program the
266 * surface state at the start of the whole surface.
267 */
268 if (brw->gen >= 7)
269 return;
270
271 /* Check if depth buffer is in depth/stencil format. If so, then it's only
272 * safe to invalidate it if we're also clearing stencil, and both depth_irb
273 * and stencil_irb point to the same miptree.
274 *
275 * Note: it's not sufficient to check for the case where
276 * _mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL,
277 * because this fails to catch depth/stencil buffers on hardware that uses
278 * separate stencil. To catch that case, we check whether
279 * depth_mt->stencil_mt is non-NULL.
280 */
281 if (depth_irb && invalidate_depth &&
282 (_mesa_get_format_base_format(depth_mt->format) == GL_DEPTH_STENCIL ||
283 depth_mt->stencil_mt)) {
284 invalidate_depth = invalidate_stencil && depth_irb && stencil_irb
285 && depth_irb->mt == stencil_irb->mt;
286 }
287
288 uint32_t tile_mask_x, tile_mask_y;
289 brw_get_depthstencil_tile_masks(depth_mt,
290 depth_mt ? depth_irb->mt_level : 0,
291 depth_mt ? depth_irb->mt_layer : 0,
292 stencil_mt,
293 &tile_mask_x, &tile_mask_y);
294
295 if (depth_irb) {
296 tile_x = depth_irb->draw_x & tile_mask_x;
297 tile_y = depth_irb->draw_y & tile_mask_y;
298
299 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
300 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
301 * Coordinate Offset X/Y":
302 *
303 * "The 3 LSBs of both offsets must be zero to ensure correct
304 * alignment"
305 */
306 if (tile_x & 7 || tile_y & 7)
307 rebase_depth = true;
308
309 /* We didn't even have intra-tile offsets before g45. */
310 if (!brw->has_surface_tile_offset) {
311 if (tile_x || tile_y)
312 rebase_depth = true;
313 }
314
315 if (rebase_depth) {
316 perf_debug("HW workaround: blitting depth level %d to a temporary "
317 "to fix alignment (depth tile offset %d,%d)\n",
318 depth_irb->mt_level, tile_x, tile_y);
319 intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
320 /* In the case of stencil_irb being the same packed depth/stencil
321 * texture but not the same rb, make it point at our rebased mt, too.
322 */
323 if (stencil_irb &&
324 stencil_irb != depth_irb &&
325 stencil_irb->mt == depth_mt) {
326 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
327 intel_renderbuffer_set_draw_offset(stencil_irb);
328 }
329
330 stencil_mt = get_stencil_miptree(stencil_irb);
331
332 tile_x = depth_irb->draw_x & tile_mask_x;
333 tile_y = depth_irb->draw_y & tile_mask_y;
334 }
335
336 if (stencil_irb) {
337 stencil_mt = get_stencil_miptree(stencil_irb);
338 intel_miptree_get_image_offset(stencil_mt,
339 stencil_irb->mt_level,
340 stencil_irb->mt_layer,
341 &stencil_draw_x, &stencil_draw_y);
342 int stencil_tile_x = stencil_draw_x & tile_mask_x;
343 int stencil_tile_y = stencil_draw_y & tile_mask_y;
344
345 /* If stencil doesn't match depth, then we'll need to rebase stencil
346 * as well. (if we hadn't decided to rebase stencil before, the
347 * post-stencil depth test will also rebase depth to try to match it
348 * up).
349 */
350 if (tile_x != stencil_tile_x ||
351 tile_y != stencil_tile_y) {
352 rebase_stencil = true;
353 }
354 }
355 }
356
357 /* If we have (just) stencil, check it for ignored low bits as well */
358 if (stencil_irb) {
359 intel_miptree_get_image_offset(stencil_mt,
360 stencil_irb->mt_level,
361 stencil_irb->mt_layer,
362 &stencil_draw_x, &stencil_draw_y);
363 stencil_tile_x = stencil_draw_x & tile_mask_x;
364 stencil_tile_y = stencil_draw_y & tile_mask_y;
365
366 if (stencil_tile_x & 7 || stencil_tile_y & 7)
367 rebase_stencil = true;
368
369 if (!brw->has_surface_tile_offset) {
370 if (stencil_tile_x || stencil_tile_y)
371 rebase_stencil = true;
372 }
373 }
374
375 if (rebase_stencil) {
376 perf_debug("HW workaround: blitting stencil level %d to a temporary "
377 "to fix alignment (stencil tile offset %d,%d)\n",
378 stencil_irb->mt_level, stencil_tile_x, stencil_tile_y);
379
380 intel_renderbuffer_move_to_temp(brw, stencil_irb, invalidate_stencil);
381 stencil_mt = get_stencil_miptree(stencil_irb);
382
383 intel_miptree_get_image_offset(stencil_mt,
384 stencil_irb->mt_level,
385 stencil_irb->mt_layer,
386 &stencil_draw_x, &stencil_draw_y);
387 stencil_tile_x = stencil_draw_x & tile_mask_x;
388 stencil_tile_y = stencil_draw_y & tile_mask_y;
389
390 if (depth_irb && depth_irb->mt == stencil_irb->mt) {
391 intel_miptree_reference(&depth_irb->mt, stencil_irb->mt);
392 intel_renderbuffer_set_draw_offset(depth_irb);
393 } else if (depth_irb && !rebase_depth) {
394 if (tile_x != stencil_tile_x ||
395 tile_y != stencil_tile_y) {
396 perf_debug("HW workaround: blitting depth level %d to a temporary "
397 "to match stencil level %d alignment (depth tile offset "
398 "%d,%d, stencil offset %d,%d)\n",
399 depth_irb->mt_level,
400 stencil_irb->mt_level,
401 tile_x, tile_y,
402 stencil_tile_x, stencil_tile_y);
403
404 intel_renderbuffer_move_to_temp(brw, depth_irb, invalidate_depth);
405
406 tile_x = depth_irb->draw_x & tile_mask_x;
407 tile_y = depth_irb->draw_y & tile_mask_y;
408
409 if (stencil_irb && stencil_irb->mt == depth_mt) {
410 intel_miptree_reference(&stencil_irb->mt, depth_irb->mt);
411 intel_renderbuffer_set_draw_offset(stencil_irb);
412 }
413
414 WARN_ONCE(stencil_tile_x != tile_x ||
415 stencil_tile_y != tile_y,
416 "Rebased stencil tile offset (%d,%d) doesn't match depth "
417 "tile offset (%d,%d).\n",
418 stencil_tile_x, stencil_tile_y,
419 tile_x, tile_y);
420 }
421 }
422 }
423
424 if (!depth_irb) {
425 tile_x = stencil_tile_x;
426 tile_y = stencil_tile_y;
427 }
428
429 /* While we just tried to get everything aligned, we may have failed to do
430 * so in the case of rendering to array or 3D textures, where nonzero faces
431 * will still have an offset post-rebase. At least give an informative
432 * warning.
433 */
434 WARN_ONCE((tile_x & 7) || (tile_y & 7),
435 "Depth/stencil buffer needs alignment to 8-pixel boundaries.\n"
436 "Truncating offset, bad rendering may occur.\n");
437 tile_x &= ~7;
438 tile_y &= ~7;
439
440 /* Now, after rebasing, save off the new dephtstencil state so the hardware
441 * packets can just dereference that without re-calculating tile offsets.
442 */
443 brw->depthstencil.tile_x = tile_x;
444 brw->depthstencil.tile_y = tile_y;
445 if (depth_irb) {
446 depth_mt = depth_irb->mt;
447 brw->depthstencil.depth_mt = depth_mt;
448 brw->depthstencil.depth_offset =
449 intel_miptree_get_aligned_offset(depth_mt,
450 depth_irb->draw_x & ~tile_mask_x,
451 depth_irb->draw_y & ~tile_mask_y,
452 false);
453 if (intel_renderbuffer_has_hiz(depth_irb)) {
454 brw->depthstencil.hiz_offset =
455 intel_miptree_get_aligned_offset(depth_mt,
456 depth_irb->draw_x & ~tile_mask_x,
457 (depth_irb->draw_y & ~tile_mask_y) / 2,
458 false);
459 }
460 }
461 if (stencil_irb) {
462 stencil_mt = get_stencil_miptree(stencil_irb);
463
464 brw->depthstencil.stencil_mt = stencil_mt;
465 if (stencil_mt->format == MESA_FORMAT_S_UINT8) {
466 /* Note: we can't compute the stencil offset using
467 * intel_region_get_aligned_offset(), because stencil_region claims
468 * that the region is untiled even though it's W tiled.
469 */
470 brw->depthstencil.stencil_offset =
471 (stencil_draw_y & ~tile_mask_y) * stencil_mt->region->pitch +
472 (stencil_draw_x & ~tile_mask_x) * 64;
473 }
474 }
475 }
476
477 void
478 brw_emit_depthbuffer(struct brw_context *brw)
479 {
480 struct gl_context *ctx = &brw->ctx;
481 struct gl_framebuffer *fb = ctx->DrawBuffer;
482 /* _NEW_BUFFERS */
483 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
484 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
485 struct intel_mipmap_tree *depth_mt = brw->depthstencil.depth_mt;
486 struct intel_mipmap_tree *stencil_mt = brw->depthstencil.stencil_mt;
487 uint32_t tile_x = brw->depthstencil.tile_x;
488 uint32_t tile_y = brw->depthstencil.tile_y;
489 bool hiz = depth_irb && intel_renderbuffer_has_hiz(depth_irb);
490 bool separate_stencil = false;
491 uint32_t depth_surface_type = BRW_SURFACE_NULL;
492 uint32_t depthbuffer_format = BRW_DEPTHFORMAT_D32_FLOAT;
493 uint32_t depth_offset = 0;
494 uint32_t width = 1, height = 1;
495
496 if (stencil_mt) {
497 separate_stencil = stencil_mt->format == MESA_FORMAT_S_UINT8;
498
499 /* Gen7 supports only separate stencil */
500 assert(separate_stencil || brw->gen < 7);
501 }
502
503 /* If there's a packed depth/stencil bound to stencil only, we need to
504 * emit the packed depth/stencil buffer packet.
505 */
506 if (!depth_irb && stencil_irb && !separate_stencil) {
507 depth_irb = stencil_irb;
508 depth_mt = stencil_mt;
509 }
510
511 if (depth_irb && depth_mt) {
512 /* When 3DSTATE_DEPTH_BUFFER.Separate_Stencil_Enable is set, then
513 * 3DSTATE_DEPTH_BUFFER.Surface_Format is not permitted to be a packed
514 * depthstencil format.
515 *
516 * Gens prior to 7 require that HiZ_Enable and Separate_Stencil_Enable be
517 * set to the same value. Gens after 7 implicitly always set
518 * Separate_Stencil_Enable; software cannot disable it.
519 */
520 if ((brw->gen < 7 && hiz) || brw->gen >= 7) {
521 assert(!_mesa_is_format_packed_depth_stencil(depth_mt->format));
522 }
523
524 /* Prior to Gen7, if using separate stencil, hiz must be enabled. */
525 assert(brw->gen >= 7 || !separate_stencil || hiz);
526
527 assert(brw->gen < 6 || depth_mt->region->tiling == I915_TILING_Y);
528 assert(!hiz || depth_mt->region->tiling == I915_TILING_Y);
529
530 depthbuffer_format = brw_depthbuffer_format(brw);
531 depth_surface_type = BRW_SURFACE_2D;
532 depth_offset = brw->depthstencil.depth_offset;
533 width = depth_irb->Base.Base.Width;
534 height = depth_irb->Base.Base.Height;
535 } else if (separate_stencil) {
536 /*
537 * There exists a separate stencil buffer but no depth buffer.
538 *
539 * The stencil buffer inherits most of its fields from
540 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
541 * height.
542 *
543 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
544 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
545 * [DevGT+]: This field must be set to TRUE.
546 */
547 assert(brw->has_separate_stencil);
548
549 depth_surface_type = BRW_SURFACE_2D;
550 width = stencil_irb->Base.Base.Width;
551 height = stencil_irb->Base.Base.Height;
552 }
553
554 if (depth_mt)
555 brw_render_cache_set_check_flush(brw, depth_mt->region->bo);
556 if (stencil_mt)
557 brw_render_cache_set_check_flush(brw, stencil_mt->region->bo);
558
559 brw->vtbl.emit_depth_stencil_hiz(brw, depth_mt, depth_offset,
560 depthbuffer_format, depth_surface_type,
561 stencil_mt, hiz, separate_stencil,
562 width, height, tile_x, tile_y);
563 }
564
565 void
566 brw_emit_depth_stencil_hiz(struct brw_context *brw,
567 struct intel_mipmap_tree *depth_mt,
568 uint32_t depth_offset, uint32_t depthbuffer_format,
569 uint32_t depth_surface_type,
570 struct intel_mipmap_tree *stencil_mt,
571 bool hiz, bool separate_stencil,
572 uint32_t width, uint32_t height,
573 uint32_t tile_x, uint32_t tile_y)
574 {
575 /* Enable the hiz bit if we're doing separate stencil, because it and the
576 * separate stencil bit must have the same value. From Section 2.11.5.6.1.1
577 * 3DSTATE_DEPTH_BUFFER, Bit 1.21 "Separate Stencil Enable":
578 * [DevIL]: If this field is enabled, Hierarchical Depth Buffer
579 * Enable must also be enabled.
580 *
581 * [DevGT]: This field must be set to the same value (enabled or
582 * disabled) as Hierarchical Depth Buffer Enable
583 */
584 bool enable_hiz_ss = hiz || separate_stencil;
585
586
587 /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
588 * non-pipelined state that will need the PIPE_CONTROL workaround.
589 */
590 if (brw->gen == 6) {
591 intel_emit_post_sync_nonzero_flush(brw);
592 intel_emit_depth_stall_flushes(brw);
593 }
594
595 unsigned int len;
596 if (brw->gen >= 6)
597 len = 7;
598 else if (brw->is_g4x || brw->gen == 5)
599 len = 6;
600 else
601 len = 5;
602
603 BEGIN_BATCH(len);
604 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
605 OUT_BATCH((depth_mt ? depth_mt->region->pitch - 1 : 0) |
606 (depthbuffer_format << 18) |
607 ((enable_hiz_ss ? 1 : 0) << 21) | /* separate stencil enable */
608 ((enable_hiz_ss ? 1 : 0) << 22) | /* hiz enable */
609 (BRW_TILEWALK_YMAJOR << 26) |
610 ((depth_mt ? depth_mt->region->tiling != I915_TILING_NONE : 1)
611 << 27) |
612 (depth_surface_type << 29));
613
614 if (depth_mt) {
615 OUT_RELOC(depth_mt->region->bo,
616 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
617 depth_offset);
618 } else {
619 OUT_BATCH(0);
620 }
621
622 OUT_BATCH(((width + tile_x - 1) << 6) |
623 ((height + tile_y - 1) << 19));
624 OUT_BATCH(0);
625
626 if (brw->is_g4x || brw->gen >= 5)
627 OUT_BATCH(tile_x | (tile_y << 16));
628 else
629 assert(tile_x == 0 && tile_y == 0);
630
631 if (brw->gen >= 6)
632 OUT_BATCH(0);
633
634 ADVANCE_BATCH();
635
636 if (hiz || separate_stencil) {
637 /*
638 * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
639 * stencil enable' and 'hiz enable' bits were set. Therefore we must
640 * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
641 * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
642 * failure to do so causes hangs on gen5 and a stall on gen6.
643 */
644
645 /* Emit hiz buffer. */
646 if (hiz) {
647 struct intel_mipmap_tree *hiz_mt = depth_mt->hiz_mt;
648 BEGIN_BATCH(3);
649 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
650 OUT_BATCH(hiz_mt->region->pitch - 1);
651 OUT_RELOC(hiz_mt->region->bo,
652 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
653 brw->depthstencil.hiz_offset);
654 ADVANCE_BATCH();
655 } else {
656 BEGIN_BATCH(3);
657 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
658 OUT_BATCH(0);
659 OUT_BATCH(0);
660 ADVANCE_BATCH();
661 }
662
663 /* Emit stencil buffer. */
664 if (separate_stencil) {
665 struct intel_region *region = stencil_mt->region;
666
667 BEGIN_BATCH(3);
668 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
669 /* The stencil buffer has quirky pitch requirements. From Vol 2a,
670 * 11.5.6.2.1 3DSTATE_STENCIL_BUFFER, field "Surface Pitch":
671 * The pitch must be set to 2x the value computed based on width, as
672 * the stencil buffer is stored with two rows interleaved.
673 */
674 OUT_BATCH(2 * region->pitch - 1);
675 OUT_RELOC(region->bo,
676 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
677 brw->depthstencil.stencil_offset);
678 ADVANCE_BATCH();
679 } else {
680 BEGIN_BATCH(3);
681 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
682 OUT_BATCH(0);
683 OUT_BATCH(0);
684 ADVANCE_BATCH();
685 }
686 }
687
688 /*
689 * On Gen >= 6, emit clear params for safety. If using hiz, then clear
690 * params must be emitted.
691 *
692 * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
693 * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
694 * when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
695 */
696 if (brw->gen >= 6 || hiz) {
697 if (brw->gen == 6)
698 intel_emit_post_sync_nonzero_flush(brw);
699
700 BEGIN_BATCH(2);
701 OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 |
702 GEN5_DEPTH_CLEAR_VALID |
703 (2 - 2));
704 OUT_BATCH(depth_mt ? depth_mt->depth_clear_value : 0);
705 ADVANCE_BATCH();
706 }
707 }
708
709 const struct brw_tracked_state brw_depthbuffer = {
710 .dirty = {
711 .mesa = _NEW_BUFFERS,
712 .brw = BRW_NEW_BATCH,
713 .cache = 0,
714 },
715 .emit = brw_emit_depthbuffer,
716 };
717
718
719
720 /***********************************************************************
721 * Polygon stipple packet
722 */
723
724 static void upload_polygon_stipple(struct brw_context *brw)
725 {
726 struct gl_context *ctx = &brw->ctx;
727 GLuint i;
728
729 /* _NEW_POLYGON */
730 if (!ctx->Polygon.StippleFlag)
731 return;
732
733 if (brw->gen == 6)
734 intel_emit_post_sync_nonzero_flush(brw);
735
736 BEGIN_BATCH(33);
737 OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
738
739 /* Polygon stipple is provided in OpenGL order, i.e. bottom
740 * row first. If we're rendering to a window (i.e. the
741 * default frame buffer object, 0), then we need to invert
742 * it to match our pixel layout. But if we're rendering
743 * to a FBO (i.e. any named frame buffer object), we *don't*
744 * need to invert - we already match the layout.
745 */
746 if (_mesa_is_winsys_fbo(ctx->DrawBuffer)) {
747 for (i = 0; i < 32; i++)
748 OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
749 }
750 else {
751 for (i = 0; i < 32; i++)
752 OUT_BATCH(ctx->PolygonStipple[i]);
753 }
754 ADVANCE_BATCH();
755 }
756
757 const struct brw_tracked_state brw_polygon_stipple = {
758 .dirty = {
759 .mesa = (_NEW_POLYGONSTIPPLE |
760 _NEW_POLYGON),
761 .brw = BRW_NEW_CONTEXT,
762 .cache = 0
763 },
764 .emit = upload_polygon_stipple
765 };
766
767
768 /***********************************************************************
769 * Polygon stipple offset packet
770 */
771
772 static void upload_polygon_stipple_offset(struct brw_context *brw)
773 {
774 struct gl_context *ctx = &brw->ctx;
775
776 /* _NEW_POLYGON */
777 if (!ctx->Polygon.StippleFlag)
778 return;
779
780 if (brw->gen == 6)
781 intel_emit_post_sync_nonzero_flush(brw);
782
783 BEGIN_BATCH(2);
784 OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
785
786 /* _NEW_BUFFERS
787 *
788 * If we're drawing to a system window we have to invert the Y axis
789 * in order to match the OpenGL pixel coordinate system, and our
790 * offset must be matched to the window position. If we're drawing
791 * to a user-created FBO then our native pixel coordinate system
792 * works just fine, and there's no window system to worry about.
793 */
794 if (_mesa_is_winsys_fbo(ctx->DrawBuffer))
795 OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
796 else
797 OUT_BATCH(0);
798 ADVANCE_BATCH();
799 }
800
801 const struct brw_tracked_state brw_polygon_stipple_offset = {
802 .dirty = {
803 .mesa = (_NEW_BUFFERS |
804 _NEW_POLYGON),
805 .brw = BRW_NEW_CONTEXT,
806 .cache = 0
807 },
808 .emit = upload_polygon_stipple_offset
809 };
810
811 /**********************************************************************
812 * AA Line parameters
813 */
814 static void upload_aa_line_parameters(struct brw_context *brw)
815 {
816 struct gl_context *ctx = &brw->ctx;
817
818 if (!ctx->Line.SmoothFlag)
819 return;
820
821 /* Original Gen4 doesn't have 3DSTATE_AA_LINE_PARAMETERS. */
822 if (brw->gen == 4 && !brw->is_g4x)
823 return;
824
825 if (brw->gen == 6)
826 intel_emit_post_sync_nonzero_flush(brw);
827
828 BEGIN_BATCH(3);
829 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
830 /* use legacy aa line coverage computation */
831 OUT_BATCH(0);
832 OUT_BATCH(0);
833 ADVANCE_BATCH();
834 }
835
836 const struct brw_tracked_state brw_aa_line_parameters = {
837 .dirty = {
838 .mesa = _NEW_LINE,
839 .brw = BRW_NEW_CONTEXT,
840 .cache = 0
841 },
842 .emit = upload_aa_line_parameters
843 };
844
845 /***********************************************************************
846 * Line stipple packet
847 */
848
849 static void upload_line_stipple(struct brw_context *brw)
850 {
851 struct gl_context *ctx = &brw->ctx;
852 GLfloat tmp;
853 GLint tmpi;
854
855 if (!ctx->Line.StippleFlag)
856 return;
857
858 if (brw->gen == 6)
859 intel_emit_post_sync_nonzero_flush(brw);
860
861 BEGIN_BATCH(3);
862 OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
863 OUT_BATCH(ctx->Line.StipplePattern);
864
865 if (brw->gen >= 7) {
866 /* in U1.16 */
867 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
868 tmpi = tmp * (1<<16);
869 OUT_BATCH(tmpi << 15 | ctx->Line.StippleFactor);
870 }
871 else {
872 /* in U1.13 */
873 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
874 tmpi = tmp * (1<<13);
875 OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
876 }
877
878 ADVANCE_BATCH();
879 }
880
881 const struct brw_tracked_state brw_line_stipple = {
882 .dirty = {
883 .mesa = _NEW_LINE,
884 .brw = BRW_NEW_CONTEXT,
885 .cache = 0
886 },
887 .emit = upload_line_stipple
888 };
889
890
891 /***********************************************************************
892 * Misc invariant state packets
893 */
894
895 void
896 brw_upload_invariant_state(struct brw_context *brw)
897 {
898 const bool is_965 = brw->gen == 4 && !brw->is_g4x;
899
900 /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
901 if (brw->gen == 6)
902 intel_emit_post_sync_nonzero_flush(brw);
903
904 /* Select the 3D pipeline (as opposed to media) */
905 const uint32_t _3DSTATE_PIPELINE_SELECT =
906 is_965 ? CMD_PIPELINE_SELECT_965 : CMD_PIPELINE_SELECT_GM45;
907 BEGIN_BATCH(1);
908 OUT_BATCH(_3DSTATE_PIPELINE_SELECT << 16 | 0);
909 ADVANCE_BATCH();
910
911 if (brw->gen < 6) {
912 /* Disable depth offset clamping. */
913 BEGIN_BATCH(2);
914 OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
915 OUT_BATCH_F(0.0);
916 ADVANCE_BATCH();
917 }
918
919 if (brw->gen >= 8) {
920 BEGIN_BATCH(3);
921 OUT_BATCH(CMD_STATE_SIP << 16 | (3 - 2));
922 OUT_BATCH(0);
923 OUT_BATCH(0);
924 ADVANCE_BATCH();
925 } else {
926 BEGIN_BATCH(2);
927 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
928 OUT_BATCH(0);
929 ADVANCE_BATCH();
930 }
931
932 const uint32_t _3DSTATE_VF_STATISTICS =
933 is_965 ? GEN4_3DSTATE_VF_STATISTICS : GM45_3DSTATE_VF_STATISTICS;
934 BEGIN_BATCH(1);
935 OUT_BATCH(_3DSTATE_VF_STATISTICS << 16 |
936 (unlikely(INTEL_DEBUG & DEBUG_STATS) ? 1 : 0));
937 ADVANCE_BATCH();
938 }
939
940 const struct brw_tracked_state brw_invariant_state = {
941 .dirty = {
942 .mesa = 0,
943 .brw = BRW_NEW_CONTEXT,
944 .cache = 0
945 },
946 .emit = brw_upload_invariant_state
947 };
948
949 /**
950 * Define the base addresses which some state is referenced from.
951 *
952 * This allows us to avoid having to emit relocations for the objects,
953 * and is actually required for binding table pointers on gen6.
954 *
955 * Surface state base address covers binding table pointers and
956 * surface state objects, but not the surfaces that the surface state
957 * objects point to.
958 */
959 static void upload_state_base_address( struct brw_context *brw )
960 {
961 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
962 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
963 * programmed prior to STATE_BASE_ADDRESS.
964 *
965 * However, given that the instruction SBA (general state base
966 * address) on this chipset is always set to 0 across X and GL,
967 * maybe this isn't required for us in particular.
968 */
969
970 if (brw->gen >= 6) {
971 uint8_t mocs = brw->gen == 7 ? GEN7_MOCS_L3 : 0;
972
973 if (brw->gen == 6)
974 intel_emit_post_sync_nonzero_flush(brw);
975
976 BEGIN_BATCH(10);
977 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
978 OUT_BATCH(mocs << 8 | /* General State Memory Object Control State */
979 mocs << 4 | /* Stateless Data Port Access Memory Object Control State */
980 1); /* General State Base Address Modify Enable */
981 /* Surface state base address:
982 * BINDING_TABLE_STATE
983 * SURFACE_STATE
984 */
985 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
986 /* Dynamic state base address:
987 * SAMPLER_STATE
988 * SAMPLER_BORDER_COLOR_STATE
989 * CLIP, SF, WM/CC viewport state
990 * COLOR_CALC_STATE
991 * DEPTH_STENCIL_STATE
992 * BLEND_STATE
993 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
994 * Disable is clear, which we rely on)
995 */
996 OUT_RELOC(brw->batch.bo, (I915_GEM_DOMAIN_RENDER |
997 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
998
999 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
1000 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
1001 1); /* Instruction base address: shader kernels (incl. SIP) */
1002
1003 OUT_BATCH(1); /* General state upper bound */
1004 /* Dynamic state upper bound. Although the documentation says that
1005 * programming it to zero will cause it to be ignored, that is a lie.
1006 * If this isn't programmed to a real bound, the sampler border color
1007 * pointer is rejected, causing border color to mysteriously fail.
1008 */
1009 OUT_BATCH(0xfffff001);
1010 OUT_BATCH(1); /* Indirect object upper bound */
1011 OUT_BATCH(1); /* Instruction access upper bound */
1012 ADVANCE_BATCH();
1013 } else if (brw->gen == 5) {
1014 BEGIN_BATCH(8);
1015 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
1016 OUT_BATCH(1); /* General state base address */
1017 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
1018 1); /* Surface state base address */
1019 OUT_BATCH(1); /* Indirect object base address */
1020 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
1021 1); /* Instruction base address */
1022 OUT_BATCH(0xfffff001); /* General state upper bound */
1023 OUT_BATCH(1); /* Indirect object upper bound */
1024 OUT_BATCH(1); /* Instruction access upper bound */
1025 ADVANCE_BATCH();
1026 } else {
1027 BEGIN_BATCH(6);
1028 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
1029 OUT_BATCH(1); /* General state base address */
1030 OUT_RELOC(brw->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
1031 1); /* Surface state base address */
1032 OUT_BATCH(1); /* Indirect object base address */
1033 OUT_BATCH(1); /* General state upper bound */
1034 OUT_BATCH(1); /* Indirect object upper bound */
1035 ADVANCE_BATCH();
1036 }
1037
1038 /* According to section 3.6.1 of VOL1 of the 965 PRM,
1039 * STATE_BASE_ADDRESS updates require a reissue of:
1040 *
1041 * 3DSTATE_PIPELINE_POINTERS
1042 * 3DSTATE_BINDING_TABLE_POINTERS
1043 * MEDIA_STATE_POINTERS
1044 *
1045 * and this continues through Ironlake. The Sandy Bridge PRM, vol
1046 * 1 part 1 says that the folowing packets must be reissued:
1047 *
1048 * 3DSTATE_CC_POINTERS
1049 * 3DSTATE_BINDING_TABLE_POINTERS
1050 * 3DSTATE_SAMPLER_STATE_POINTERS
1051 * 3DSTATE_VIEWPORT_STATE_POINTERS
1052 * MEDIA_STATE_POINTERS
1053 *
1054 * Those are always reissued following SBA updates anyway (new
1055 * batch time), except in the case of the program cache BO
1056 * changing. Having a separate state flag makes the sequence more
1057 * obvious.
1058 */
1059
1060 brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
1061 }
1062
1063 const struct brw_tracked_state brw_state_base_address = {
1064 .dirty = {
1065 .mesa = 0,
1066 .brw = (BRW_NEW_BATCH |
1067 BRW_NEW_PROGRAM_CACHE),
1068 .cache = 0,
1069 },
1070 .emit = upload_state_base_address
1071 };