i965/hiz: Convert gen{6,7}_hiz.h to gen{6,7}_blorp.h
[mesa.git] / src / mesa / drivers / dri / i965 / gen6_blorp.cpp
1 /*
2 * Copyright © 2011 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <assert.h>
25
26 #include "intel_batchbuffer.h"
27 #include "intel_fbo.h"
28 #include "intel_mipmap_tree.h"
29
30 #include "brw_context.h"
31 #include "brw_defines.h"
32 #include "brw_state.h"
33
34 #include "gen6_blorp.h"
35
36 /**
37 * \name Constants for HiZ VBO
38 * \{
39 *
40 * \see brw_context::hiz::vertex_bo
41 */
42 #define GEN6_HIZ_NUM_VERTICES 3
43 #define GEN6_HIZ_NUM_VUE_ELEMS 8
44 #define GEN6_HIZ_VBO_SIZE (GEN6_HIZ_NUM_VERTICES \
45 * GEN6_HIZ_NUM_VUE_ELEMS \
46 * sizeof(float))
47 /** \} */
48
49 void
50 gen6_hiz_emit_batch_head(struct brw_context *brw)
51 {
52 struct gl_context *ctx = &brw->intel.ctx;
53 struct intel_context *intel = &brw->intel;
54
55 /* To ensure that the batch contains only the resolve, flush the batch
56 * before beginning and after finishing emitting the resolve packets.
57 *
58 * Ideally, we would not need to flush for the resolve op. But, I suspect
59 * that it's unsafe for CMD_PIPELINE_SELECT to occur multiple times in
60 * a single batch, and there is no safe way to ensure that other than by
61 * fencing the resolve with flushes. Ideally, we would just detect if
62 * a batch is in progress and do the right thing, but that would require
63 * the ability to *safely* access brw_context::state::dirty::brw
64 * outside of the brw_upload_state() codepath.
65 */
66 intel_flush(ctx);
67
68 /* CMD_PIPELINE_SELECT
69 *
70 * Select the 3D pipeline, as opposed to the media pipeline.
71 */
72 {
73 BEGIN_BATCH(1);
74 OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16);
75 ADVANCE_BATCH();
76 }
77
78 /* 3DSTATE_MULTISAMPLE */
79 {
80 int length = intel->gen == 7 ? 4 : 3;
81
82 BEGIN_BATCH(length);
83 OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (length - 2));
84 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
85 MS_NUMSAMPLES_1);
86 OUT_BATCH(0);
87 if (length >= 4)
88 OUT_BATCH(0);
89 ADVANCE_BATCH();
90
91 }
92
93 /* 3DSTATE_SAMPLE_MASK */
94 {
95 BEGIN_BATCH(2);
96 OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
97 OUT_BATCH(1);
98 ADVANCE_BATCH();
99 }
100
101 /* CMD_STATE_BASE_ADDRESS
102 *
103 * From the Sandy Bridge PRM, Volume 1, Part 1, Table STATE_BASE_ADDRESS:
104 * The following commands must be reissued following any change to the
105 * base addresses:
106 * 3DSTATE_CC_POINTERS
107 * 3DSTATE_BINDING_TABLE_POINTERS
108 * 3DSTATE_SAMPLER_STATE_POINTERS
109 * 3DSTATE_VIEWPORT_STATE_POINTERS
110 * MEDIA_STATE_POINTERS
111 */
112 {
113 BEGIN_BATCH(10);
114 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
115 OUT_BATCH(1); /* GeneralStateBaseAddressModifyEnable */
116 /* SurfaceStateBaseAddress */
117 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
118 /* DynamicStateBaseAddress */
119 OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
120 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
121 OUT_BATCH(1); /* IndirectObjectBaseAddress */
122 OUT_BATCH(1); /* InstructionBaseAddress */
123 OUT_BATCH(1); /* GeneralStateUpperBound */
124 OUT_BATCH(1); /* DynamicStateUpperBound */
125 OUT_BATCH(1); /* IndirectObjectUpperBound*/
126 OUT_BATCH(1); /* InstructionAccessUpperBound */
127 ADVANCE_BATCH();
128 }
129 }
130
131 void
132 gen6_hiz_emit_vertices(struct brw_context *brw,
133 struct intel_mipmap_tree *mt,
134 unsigned int level,
135 unsigned int layer)
136 {
137 struct intel_context *intel = &brw->intel;
138 uint32_t vertex_offset;
139
140 /* Setup VBO for the rectangle primitive..
141 *
142 * A rectangle primitive (3DPRIM_RECTLIST) consists of only three
143 * vertices. The vertices reside in screen space with DirectX coordinates
144 * (that is, (0, 0) is the upper left corner).
145 *
146 * v2 ------ implied
147 * | |
148 * | |
149 * v0 ----- v1
150 *
151 * Since the VS is disabled, the clipper loads each VUE directly from
152 * the URB. This is controlled by the 3DSTATE_VERTEX_BUFFERS and
153 * 3DSTATE_VERTEX_ELEMENTS packets below. The VUE contents are as follows:
154 * dw0: Reserved, MBZ.
155 * dw1: Render Target Array Index. The HiZ op does not use indexed
156 * vertices, so set the dword to 0.
157 * dw2: Viewport Index. The HiZ op disables viewport mapping and
158 * scissoring, so set the dword to 0.
159 * dw3: Point Width: The HiZ op does not emit the POINTLIST primitive, so
160 * set the dword to 0.
161 * dw4: Vertex Position X.
162 * dw5: Vertex Position Y.
163 * dw6: Vertex Position Z.
164 * dw7: Vertex Position W.
165 *
166 * For details, see the Sandybridge PRM, Volume 2, Part 1, Section 1.5.1
167 * "Vertex URB Entry (VUE) Formats".
168 */
169 {
170 const int width = mt->level[level].width;
171 const int height = mt->level[level].height;
172 float *vertex_data;
173
174 const float vertices[GEN6_HIZ_VBO_SIZE] = {
175 /* v0 */ 0, 0, 0, 0, 0, height, 0, 1,
176 /* v1 */ 0, 0, 0, 0, width, height, 0, 1,
177 /* v2 */ 0, 0, 0, 0, 0, 0, 0, 1,
178 };
179
180 vertex_data = (float *) brw_state_batch(brw, AUB_TRACE_NO_TYPE,
181 GEN6_HIZ_VBO_SIZE, 32,
182 &vertex_offset);
183 memcpy(vertex_data, vertices, GEN6_HIZ_VBO_SIZE);
184 }
185
186 /* 3DSTATE_VERTEX_BUFFERS */
187 {
188 const int num_buffers = 1;
189 const int batch_length = 1 + 4 * num_buffers;
190
191 uint32_t dw0 = GEN6_VB0_ACCESS_VERTEXDATA |
192 (GEN6_HIZ_NUM_VUE_ELEMS * sizeof(float)) << BRW_VB0_PITCH_SHIFT;
193
194 if (intel->gen >= 7)
195 dw0 |= GEN7_VB0_ADDRESS_MODIFYENABLE;
196
197 BEGIN_BATCH(batch_length);
198 OUT_BATCH((_3DSTATE_VERTEX_BUFFERS << 16) | (batch_length - 2));
199 OUT_BATCH(dw0);
200 /* start address */
201 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
202 vertex_offset);
203 /* end address */
204 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_VERTEX, 0,
205 vertex_offset + GEN6_HIZ_VBO_SIZE - 1);
206 OUT_BATCH(0);
207 ADVANCE_BATCH();
208 }
209
210 /* 3DSTATE_VERTEX_ELEMENTS
211 *
212 * Fetch dwords 0 - 7 from each VUE. See the comments above where
213 * hiz->vertex_bo is filled with data.
214 */
215 {
216 const int num_elements = 2;
217 const int batch_length = 1 + 2 * num_elements;
218
219 BEGIN_BATCH(batch_length);
220 OUT_BATCH((_3DSTATE_VERTEX_ELEMENTS << 16) | (batch_length - 2));
221 /* Element 0 */
222 OUT_BATCH(GEN6_VE0_VALID |
223 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT |
224 0 << BRW_VE0_SRC_OFFSET_SHIFT);
225 OUT_BATCH(BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT |
226 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_1_SHIFT |
227 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_2_SHIFT |
228 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_3_SHIFT);
229 /* Element 1 */
230 OUT_BATCH(GEN6_VE0_VALID |
231 BRW_SURFACEFORMAT_R32G32B32A32_FLOAT << BRW_VE0_FORMAT_SHIFT |
232 16 << BRW_VE0_SRC_OFFSET_SHIFT);
233 OUT_BATCH(BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_0_SHIFT |
234 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_1_SHIFT |
235 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_2_SHIFT |
236 BRW_VE1_COMPONENT_STORE_SRC << BRW_VE1_COMPONENT_3_SHIFT);
237 ADVANCE_BATCH();
238 }
239 }
240
241 /**
242 * \brief Execute a HiZ op on a miptree slice.
243 *
244 * To execute the HiZ op, this function manually constructs and emits a batch
245 * to "draw" the HiZ op's rectangle primitive. The batchbuffer is flushed
246 * before constructing and after emitting the batch.
247 *
248 * This function alters no GL state.
249 *
250 * For an overview of HiZ ops, see the following sections of the Sandy Bridge
251 * PRM, Volume 1, Part 2:
252 * - 7.5.3.1 Depth Buffer Clear
253 * - 7.5.3.2 Depth Buffer Resolve
254 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
255 */
256 static void
257 gen6_hiz_exec(struct intel_context *intel,
258 struct intel_mipmap_tree *mt,
259 unsigned int level,
260 unsigned int layer,
261 enum gen6_hiz_op op)
262 {
263 struct gl_context *ctx = &intel->ctx;
264 struct brw_context *brw = brw_context(ctx);
265 uint32_t draw_x, draw_y;
266 uint32_t tile_mask_x, tile_mask_y;
267
268 assert(op != GEN6_HIZ_OP_DEPTH_CLEAR); /* Not implemented yet. */
269 assert(mt->hiz_mt != NULL);
270 intel_miptree_check_level_layer(mt, level, layer);
271
272 {
273 /* Construct a dummy renderbuffer just to extract tile offsets. */
274 struct intel_renderbuffer rb;
275 rb.mt = mt;
276 rb.mt_level = level;
277 rb.mt_layer = layer;
278 intel_renderbuffer_set_draw_offset(&rb);
279 draw_x = rb.draw_x;
280 draw_y = rb.draw_y;
281 }
282
283 /* Compute masks to determine how much of draw_x and draw_y should be
284 * performed using the fine adjustment of "depth coordinate offset X/Y"
285 * (dw5 of 3DSTATE_DEPTH_BUFFER). See the emit_depthbuffer() function for
286 * details.
287 */
288 {
289 uint32_t depth_mask_x, depth_mask_y, hiz_mask_x, hiz_mask_y;
290 intel_region_get_tile_masks(mt->region, &depth_mask_x, &depth_mask_y);
291 intel_region_get_tile_masks(mt->hiz_mt->region,
292 &hiz_mask_x, &hiz_mask_y);
293
294 /* Each HiZ row represents 2 rows of pixels */
295 hiz_mask_y = hiz_mask_y << 1 | 1;
296
297 tile_mask_x = depth_mask_x | hiz_mask_x;
298 tile_mask_y = depth_mask_y | hiz_mask_y;
299 }
300
301 gen6_hiz_emit_batch_head(brw);
302 gen6_hiz_emit_vertices(brw, mt, level, layer);
303
304 /* 3DSTATE_URB
305 *
306 * Assign the entire URB to the VS. Even though the VS disabled, URB space
307 * is still needed because the clipper loads the VUE's from the URB. From
308 * the Sandybridge PRM, Volume 2, Part 1, Section 3DSTATE,
309 * Dword 1.15:0 "VS Number of URB Entries":
310 * This field is always used (even if VS Function Enable is DISABLED).
311 *
312 * The warning below appears in the PRM (Section 3DSTATE_URB), but we can
313 * safely ignore it because this batch contains only one draw call.
314 * Because of URB corruption caused by allocating a previous GS unit
315 * URB entry to the VS unit, software is required to send a “GS NULL
316 * Fence” (Send URB fence with VS URB size == 1 and GS URB size == 0)
317 * plus a dummy DRAW call before any case where VS will be taking over
318 * GS URB space.
319 */
320 {
321 BEGIN_BATCH(3);
322 OUT_BATCH(_3DSTATE_URB << 16 | (3 - 2));
323 OUT_BATCH(brw->urb.max_vs_entries << GEN6_URB_VS_ENTRIES_SHIFT);
324 OUT_BATCH(0);
325 ADVANCE_BATCH();
326 }
327
328 /* 3DSTATE_CC_STATE_POINTERS
329 *
330 * The pointer offsets are relative to
331 * CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
332 *
333 * The HiZ op doesn't use BLEND_STATE or COLOR_CALC_STATE.
334 */
335 {
336 uint32_t depthstencil_offset;
337 gen6_hiz_emit_depth_stencil_state(brw, op, &depthstencil_offset);
338
339 BEGIN_BATCH(4);
340 OUT_BATCH(_3DSTATE_CC_STATE_POINTERS << 16 | (4 - 2));
341 OUT_BATCH(1); /* BLEND_STATE offset */
342 OUT_BATCH(depthstencil_offset | 1); /* DEPTH_STENCIL_STATE offset */
343 OUT_BATCH(1); /* COLOR_CALC_STATE offset */
344 ADVANCE_BATCH();
345 }
346
347 /* 3DSTATE_VS
348 *
349 * Disable vertex shader.
350 */
351 {
352 /* From the BSpec, Volume 2a, Part 3 "Vertex Shader", Section
353 * 3DSTATE_VS, Dword 5.0 "VS Function Enable":
354 * [DevSNB] A pipeline flush must be programmed prior to a 3DSTATE_VS
355 * command that causes the VS Function Enable to toggle. Pipeline
356 * flush can be executed by sending a PIPE_CONTROL command with CS
357 * stall bit set and a post sync operation.
358 */
359 intel_emit_post_sync_nonzero_flush(intel);
360
361 BEGIN_BATCH(6);
362 OUT_BATCH(_3DSTATE_VS << 16 | (6 - 2));
363 OUT_BATCH(0);
364 OUT_BATCH(0);
365 OUT_BATCH(0);
366 OUT_BATCH(0);
367 OUT_BATCH(0);
368 ADVANCE_BATCH();
369 }
370
371 /* 3DSTATE_GS
372 *
373 * Disable the geometry shader.
374 */
375 {
376 BEGIN_BATCH(7);
377 OUT_BATCH(_3DSTATE_GS << 16 | (7 - 2));
378 OUT_BATCH(0);
379 OUT_BATCH(0);
380 OUT_BATCH(0);
381 OUT_BATCH(0);
382 OUT_BATCH(0);
383 OUT_BATCH(0);
384 ADVANCE_BATCH();
385 }
386
387 /* 3DSTATE_CLIP
388 *
389 * Disable the clipper.
390 *
391 * The HiZ op emits a rectangle primitive, which requires clipping to
392 * be disabled. From page 10 of the Sandy Bridge PRM Volume 2 Part 1
393 * Section 1.3 "3D Primitives Overview":
394 * RECTLIST:
395 * Either the CLIP unit should be DISABLED, or the CLIP unit's Clip
396 * Mode should be set to a value other than CLIPMODE_NORMAL.
397 *
398 * Also disable perspective divide. This doesn't change the clipper's
399 * output, but does spare a few electrons.
400 */
401 {
402 BEGIN_BATCH(4);
403 OUT_BATCH(_3DSTATE_CLIP << 16 | (4 - 2));
404 OUT_BATCH(0);
405 OUT_BATCH(GEN6_CLIP_PERSPECTIVE_DIVIDE_DISABLE);
406 OUT_BATCH(0);
407 ADVANCE_BATCH();
408 }
409
410 /* 3DSTATE_SF
411 *
412 * Disable ViewportTransformEnable (dw2.1)
413 *
414 * From the SandyBridge PRM, Volume 2, Part 1, Section 1.3, "3D
415 * Primitives Overview":
416 * RECTLIST: Viewport Mapping must be DISABLED (as is typical with the
417 * use of screen- space coordinates).
418 *
419 * A solid rectangle must be rendered, so set FrontFaceFillMode (dw2.4:3)
420 * and BackFaceFillMode (dw2.5:6) to SOLID(0).
421 *
422 * From the Sandy Bridge PRM, Volume 2, Part 1, Section
423 * 6.4.1.1 3DSTATE_SF, Field FrontFaceFillMode:
424 * SOLID: Any triangle or rectangle object found to be front-facing
425 * is rendered as a solid object. This setting is required when
426 * (rendering rectangle (RECTLIST) objects.
427 */
428 {
429 BEGIN_BATCH(20);
430 OUT_BATCH(_3DSTATE_SF << 16 | (20 - 2));
431 OUT_BATCH((1 - 1) << GEN6_SF_NUM_OUTPUTS_SHIFT | /* only position */
432 1 << GEN6_SF_URB_ENTRY_READ_LENGTH_SHIFT |
433 0 << GEN6_SF_URB_ENTRY_READ_OFFSET_SHIFT);
434 for (int i = 0; i < 18; ++i)
435 OUT_BATCH(0);
436 ADVANCE_BATCH();
437 }
438
439 /* 3DSTATE_WM
440 *
441 * Disable thread dispatch (dw5.19) and enable the HiZ op.
442 *
443 * Even though thread dispatch is disabled, max threads (dw5.25:31) must be
444 * nonzero to prevent the GPU from hanging. See the valid ranges in the
445 * BSpec, Volume 2a.11 Windower, Section 3DSTATE_WM, Dword 5.25:31
446 * "Maximum Number Of Threads".
447 */
448 {
449 uint32_t dw4 = 0;
450
451 switch (op) {
452 case GEN6_HIZ_OP_DEPTH_CLEAR:
453 assert(!"not implemented");
454 dw4 |= GEN6_WM_DEPTH_CLEAR;
455 break;
456 case GEN6_HIZ_OP_DEPTH_RESOLVE:
457 dw4 |= GEN6_WM_DEPTH_RESOLVE;
458 break;
459 case GEN6_HIZ_OP_HIZ_RESOLVE:
460 dw4 |= GEN6_WM_HIERARCHICAL_DEPTH_RESOLVE;
461 break;
462 default:
463 assert(0);
464 break;
465 }
466
467 BEGIN_BATCH(9);
468 OUT_BATCH(_3DSTATE_WM << 16 | (9 - 2));
469 OUT_BATCH(0);
470 OUT_BATCH(0);
471 OUT_BATCH(0);
472 OUT_BATCH(dw4);
473 OUT_BATCH((brw->max_wm_threads - 1) << GEN6_WM_MAX_THREADS_SHIFT);
474 OUT_BATCH((1 - 1) << GEN6_WM_NUM_SF_OUTPUTS_SHIFT); /* only position */
475 OUT_BATCH(0);
476 OUT_BATCH(0);
477 ADVANCE_BATCH();
478 }
479
480 /* 3DSTATE_DEPTH_BUFFER */
481 {
482 uint32_t width = mt->level[level].width;
483 uint32_t height = mt->level[level].height;
484
485 uint32_t tile_x = draw_x & tile_mask_x;
486 uint32_t tile_y = draw_y & tile_mask_y;
487 uint32_t offset = intel_region_get_aligned_offset(mt->region,
488 draw_x & ~tile_mask_x,
489 draw_y & ~tile_mask_y);
490
491 /* According to the Sandy Bridge PRM, volume 2 part 1, pp326-327
492 * (3DSTATE_DEPTH_BUFFER dw5), in the documentation for "Depth
493 * Coordinate Offset X/Y":
494 *
495 * "The 3 LSBs of both offsets must be zero to ensure correct
496 * alignment"
497 *
498 * We have no guarantee that tile_x and tile_y are correctly aligned,
499 * since they are determined by the mipmap layout, which is only aligned
500 * to multiples of 4.
501 *
502 * So, to avoid hanging the GPU, just smash the low order 3 bits of
503 * tile_x and tile_y to 0. This is a temporary workaround until we come
504 * up with a better solution.
505 */
506 tile_x &= ~7;
507 tile_y &= ~7;
508
509 uint32_t format;
510 switch (mt->format) {
511 case MESA_FORMAT_Z16: format = BRW_DEPTHFORMAT_D16_UNORM; break;
512 case MESA_FORMAT_Z32_FLOAT: format = BRW_DEPTHFORMAT_D32_FLOAT; break;
513 case MESA_FORMAT_X8_Z24: format = BRW_DEPTHFORMAT_D24_UNORM_X8_UINT; break;
514 default: assert(0); break;
515 }
516
517 intel_emit_post_sync_nonzero_flush(intel);
518 intel_emit_depth_stall_flushes(intel);
519
520 BEGIN_BATCH(7);
521 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (7 - 2));
522 OUT_BATCH(((mt->region->pitch * mt->region->cpp) - 1) |
523 format << 18 |
524 1 << 21 | /* separate stencil enable */
525 1 << 22 | /* hiz enable */
526 BRW_TILEWALK_YMAJOR << 26 |
527 1 << 27 | /* y-tiled */
528 BRW_SURFACE_2D << 29);
529 OUT_RELOC(mt->region->bo,
530 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
531 offset);
532 OUT_BATCH(BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1 |
533 (width + tile_x - 1) << 6 |
534 (height + tile_y - 1) << 19);
535 OUT_BATCH(0);
536 OUT_BATCH(tile_x |
537 tile_y << 16);
538 OUT_BATCH(0);
539 ADVANCE_BATCH();
540 }
541
542 /* 3DSTATE_HIER_DEPTH_BUFFER */
543 {
544 struct intel_region *hiz_region = mt->hiz_mt->region;
545 uint32_t hiz_offset =
546 intel_region_get_aligned_offset(hiz_region,
547 draw_x & ~tile_mask_x,
548 (draw_y & ~tile_mask_y) / 2);
549
550 BEGIN_BATCH(3);
551 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
552 OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1);
553 OUT_RELOC(hiz_region->bo,
554 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
555 hiz_offset);
556 ADVANCE_BATCH();
557 }
558
559 /* 3DSTATE_STENCIL_BUFFER */
560 {
561 BEGIN_BATCH(3);
562 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
563 OUT_BATCH(0);
564 OUT_BATCH(0);
565 ADVANCE_BATCH();
566 }
567
568 /* 3DSTATE_CLEAR_PARAMS
569 *
570 * From the Sandybridge PRM, Volume 2, Part 1, Section 3DSTATE_CLEAR_PARAMS:
571 * [DevSNB] 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE
572 * packet when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
573 */
574 {
575 BEGIN_BATCH(2);
576 OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 | (2 - 2));
577 OUT_BATCH(0);
578 ADVANCE_BATCH();
579 }
580
581 /* 3DSTATE_DRAWING_RECTANGLE */
582 {
583 BEGIN_BATCH(4);
584 OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
585 OUT_BATCH(0);
586 OUT_BATCH(((mt->level[level].width - 1) & 0xffff) |
587 ((mt->level[level].height - 1) << 16));
588 OUT_BATCH(0);
589 ADVANCE_BATCH();
590 }
591
592 /* 3DPRIMITIVE */
593 {
594 BEGIN_BATCH(6);
595 OUT_BATCH(CMD_3D_PRIM << 16 | (6 - 2) |
596 _3DPRIM_RECTLIST << GEN4_3DPRIM_TOPOLOGY_TYPE_SHIFT |
597 GEN4_3DPRIM_VERTEXBUFFER_ACCESS_SEQUENTIAL);
598 OUT_BATCH(3); /* vertex count per instance */
599 OUT_BATCH(0);
600 OUT_BATCH(1); /* instance count */
601 OUT_BATCH(0);
602 OUT_BATCH(0);
603 ADVANCE_BATCH();
604 }
605
606 /* See comments above at first invocation of intel_flush() in
607 * gen6_hiz_emit_batch_head().
608 */
609 intel_flush(ctx);
610
611 /* Be safe. */
612 brw->state.dirty.brw = ~0;
613 brw->state.dirty.cache = ~0;
614 }
615
616 /**
617 * \param out_offset is relative to
618 * CMD_STATE_BASE_ADDRESS.DynamicStateBaseAddress.
619 */
620 void
621 gen6_hiz_emit_depth_stencil_state(struct brw_context *brw,
622 enum gen6_hiz_op op,
623 uint32_t *out_offset)
624 {
625 struct gen6_depth_stencil_state *state;
626 state = (struct gen6_depth_stencil_state *)
627 brw_state_batch(brw, AUB_TRACE_DEPTH_STENCIL_STATE,
628 sizeof(*state), 64,
629 out_offset);
630 memset(state, 0, sizeof(*state));
631
632 /* See the following sections of the Sandy Bridge PRM, Volume 1, Part2:
633 * - 7.5.3.1 Depth Buffer Clear
634 * - 7.5.3.2 Depth Buffer Resolve
635 * - 7.5.3.3 Hierarchical Depth Buffer Resolve
636 */
637 state->ds2.depth_write_enable = 1;
638 if (op == GEN6_HIZ_OP_DEPTH_RESOLVE) {
639 state->ds2.depth_test_enable = 1;
640 state->ds2.depth_test_func = COMPAREFUNC_NEVER;
641 }
642 }
643
644 /** \see intel_context::vtbl::resolve_hiz_slice */
645 void
646 gen6_resolve_hiz_slice(struct intel_context *intel,
647 struct intel_mipmap_tree *mt,
648 uint32_t level,
649 uint32_t layer)
650 {
651 gen6_hiz_exec(intel, mt, level, layer, GEN6_HIZ_OP_HIZ_RESOLVE);
652 }
653
654 /** \see intel_context::vtbl::resolve_depth_slice */
655 void
656 gen6_resolve_depth_slice(struct intel_context *intel,
657 struct intel_mipmap_tree *mt,
658 uint32_t level,
659 uint32_t layer)
660 {
661 gen6_hiz_exec(intel, mt, level, layer, GEN6_HIZ_OP_DEPTH_RESOLVE);
662 }