i965/gen7: Fix depth buffer rendering to tile offsets.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_mipmap_tree.h"
37 #include "intel_regions.h"
38
39 #include "brw_context.h"
40 #include "brw_state.h"
41 #include "brw_defines.h"
42
43 /* Constant single cliprect for framebuffer object or DRI2 drawing */
44 static void upload_drawing_rect(struct brw_context *brw)
45 {
46 struct intel_context *intel = &brw->intel;
47 struct gl_context *ctx = &intel->ctx;
48
49 BEGIN_BATCH(4);
50 OUT_BATCH(_3DSTATE_DRAWING_RECTANGLE << 16 | (4 - 2));
51 OUT_BATCH(0); /* xmin, ymin */
52 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
53 ((ctx->DrawBuffer->Height - 1) << 16));
54 OUT_BATCH(0);
55 ADVANCE_BATCH();
56 }
57
58 const struct brw_tracked_state brw_drawing_rect = {
59 .dirty = {
60 .mesa = _NEW_BUFFERS,
61 .brw = BRW_NEW_CONTEXT,
62 .cache = 0
63 },
64 .emit = upload_drawing_rect
65 };
66
67 /**
68 * Upload the binding table pointers, which point each stage's array of surface
69 * state pointers.
70 *
71 * The binding table pointers are relative to the surface state base address,
72 * which points at the batchbuffer containing the streamed batch state.
73 */
74 static void upload_binding_table_pointers(struct brw_context *brw)
75 {
76 struct intel_context *intel = &brw->intel;
77
78 BEGIN_BATCH(6);
79 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
80 OUT_BATCH(brw->bind.bo_offset);
81 OUT_BATCH(0); /* gs */
82 OUT_BATCH(0); /* clip */
83 OUT_BATCH(0); /* sf */
84 OUT_BATCH(brw->bind.bo_offset);
85 ADVANCE_BATCH();
86 }
87
88 const struct brw_tracked_state brw_binding_table_pointers = {
89 .dirty = {
90 .mesa = 0,
91 .brw = (BRW_NEW_BATCH |
92 BRW_NEW_STATE_BASE_ADDRESS |
93 BRW_NEW_VS_BINDING_TABLE |
94 BRW_NEW_GS_BINDING_TABLE |
95 BRW_NEW_PS_BINDING_TABLE),
96 .cache = 0,
97 },
98 .emit = upload_binding_table_pointers,
99 };
100
101 /**
102 * Upload the binding table pointers, which point each stage's array of surface
103 * state pointers.
104 *
105 * The binding table pointers are relative to the surface state base address,
106 * which points at the batchbuffer containing the streamed batch state.
107 */
108 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
109 {
110 struct intel_context *intel = &brw->intel;
111
112 BEGIN_BATCH(4);
113 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
114 GEN6_BINDING_TABLE_MODIFY_VS |
115 GEN6_BINDING_TABLE_MODIFY_GS |
116 GEN6_BINDING_TABLE_MODIFY_PS |
117 (4 - 2));
118 OUT_BATCH(brw->bind.bo_offset); /* vs */
119 OUT_BATCH(brw->bind.bo_offset); /* gs */
120 OUT_BATCH(brw->bind.bo_offset); /* wm/ps */
121 ADVANCE_BATCH();
122 }
123
124 const struct brw_tracked_state gen6_binding_table_pointers = {
125 .dirty = {
126 .mesa = 0,
127 .brw = (BRW_NEW_BATCH |
128 BRW_NEW_STATE_BASE_ADDRESS |
129 BRW_NEW_VS_BINDING_TABLE |
130 BRW_NEW_GS_BINDING_TABLE |
131 BRW_NEW_PS_BINDING_TABLE),
132 .cache = 0,
133 },
134 .emit = upload_gen6_binding_table_pointers,
135 };
136
137 /**
138 * Upload pointers to the per-stage state.
139 *
140 * The state pointers in this packet are all relative to the general state
141 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
142 */
143 static void upload_pipelined_state_pointers(struct brw_context *brw )
144 {
145 struct intel_context *intel = &brw->intel;
146
147 if (intel->gen == 5) {
148 /* Need to flush before changing clip max threads for errata. */
149 BEGIN_BATCH(1);
150 OUT_BATCH(MI_FLUSH);
151 ADVANCE_BATCH();
152 }
153
154 BEGIN_BATCH(7);
155 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
156 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
157 brw->vs.state_offset);
158 if (brw->gs.prog_active)
159 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
160 brw->gs.state_offset | 1);
161 else
162 OUT_BATCH(0);
163 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
164 brw->clip.state_offset | 1);
165 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
166 brw->sf.state_offset);
167 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
168 brw->wm.state_offset);
169 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
170 brw->cc.state_offset);
171 ADVANCE_BATCH();
172
173 brw->state.dirty.brw |= BRW_NEW_PSP;
174 }
175
176 static void upload_psp_urb_cbs(struct brw_context *brw )
177 {
178 upload_pipelined_state_pointers(brw);
179 brw_upload_urb_fence(brw);
180 brw_upload_cs_urb_state(brw);
181 }
182
183 const struct brw_tracked_state brw_psp_urb_cbs = {
184 .dirty = {
185 .mesa = 0,
186 .brw = (BRW_NEW_URB_FENCE |
187 BRW_NEW_BATCH |
188 BRW_NEW_STATE_BASE_ADDRESS),
189 .cache = (CACHE_NEW_VS_UNIT |
190 CACHE_NEW_GS_UNIT |
191 CACHE_NEW_GS_PROG |
192 CACHE_NEW_CLIP_UNIT |
193 CACHE_NEW_SF_UNIT |
194 CACHE_NEW_WM_UNIT |
195 CACHE_NEW_CC_UNIT)
196 },
197 .emit = upload_psp_urb_cbs,
198 };
199
200 uint32_t
201 brw_depthbuffer_format(struct brw_context *brw)
202 {
203 struct intel_context *intel = &brw->intel;
204 struct gl_context *ctx = &intel->ctx;
205 struct gl_framebuffer *fb = ctx->DrawBuffer;
206 struct intel_renderbuffer *drb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
207 struct intel_renderbuffer *srb;
208
209 if (!drb &&
210 (srb = intel_get_renderbuffer(fb, BUFFER_STENCIL)) &&
211 !srb->mt->stencil_mt &&
212 (srb->Base.Format == MESA_FORMAT_S8_Z24 ||
213 srb->Base.Format == MESA_FORMAT_Z32_FLOAT_X24S8)) {
214 drb = srb;
215 }
216
217 if (!drb)
218 return BRW_DEPTHFORMAT_D32_FLOAT;
219
220 switch (drb->mt->format) {
221 case MESA_FORMAT_Z16:
222 return BRW_DEPTHFORMAT_D16_UNORM;
223 case MESA_FORMAT_Z32_FLOAT:
224 return BRW_DEPTHFORMAT_D32_FLOAT;
225 case MESA_FORMAT_X8_Z24:
226 if (intel->gen >= 5)
227 return BRW_DEPTHFORMAT_D24_UNORM_X8_UINT;
228 else /* Gen4 doesn't support X8; use S8 instead. */
229 return BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
230 case MESA_FORMAT_S8_Z24:
231 return BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
232 case MESA_FORMAT_Z32_FLOAT_X24S8:
233 return BRW_DEPTHFORMAT_D32_FLOAT_S8X24_UINT;
234 default:
235 _mesa_problem(ctx, "Unexpected depth format %s\n",
236 _mesa_get_format_name(drb->Base.Format));
237 return BRW_DEPTHFORMAT_D16_UNORM;
238 }
239 }
240
241 static void emit_depthbuffer(struct brw_context *brw)
242 {
243 struct intel_context *intel = &brw->intel;
244 struct gl_context *ctx = &intel->ctx;
245 struct gl_framebuffer *fb = ctx->DrawBuffer;
246 /* _NEW_BUFFERS */
247 struct intel_renderbuffer *depth_irb = intel_get_renderbuffer(fb, BUFFER_DEPTH);
248 struct intel_renderbuffer *stencil_irb = intel_get_renderbuffer(fb, BUFFER_STENCIL);
249 struct intel_mipmap_tree *stencil_mt = NULL;
250 struct intel_region *hiz_region = NULL;
251 unsigned int len;
252 bool separate_stencil = false;
253
254 if (depth_irb &&
255 depth_irb->mt &&
256 depth_irb->mt->hiz_mt) {
257 hiz_region = depth_irb->mt->hiz_mt->region;
258 }
259
260 /* 3DSTATE_DEPTH_BUFFER, 3DSTATE_STENCIL_BUFFER are both
261 * non-pipelined state that will need the PIPE_CONTROL workaround.
262 */
263 if (intel->gen == 6) {
264 intel_emit_post_sync_nonzero_flush(intel);
265 intel_emit_depth_stall_flushes(intel);
266 }
267
268 /* Find the real separate stencil mt if present. */
269 if (stencil_irb) {
270 stencil_mt = stencil_irb->mt;
271 if (stencil_mt->stencil_mt)
272 stencil_mt = stencil_mt->stencil_mt;
273
274 if (stencil_mt->format == MESA_FORMAT_S8)
275 separate_stencil = true;
276 }
277
278 /* If there's a packed depth/stencil bound to stencil only, we need to
279 * emit the packed depth/stencil buffer packet.
280 */
281 if (!depth_irb && stencil_irb && !separate_stencil)
282 depth_irb = stencil_irb;
283
284 if (intel->gen >= 6)
285 len = 7;
286 else if (intel->is_g4x || intel->gen == 5)
287 len = 6;
288 else
289 len = 5;
290
291 if (!depth_irb && !separate_stencil) {
292 BEGIN_BATCH(len);
293 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
294 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
295 (BRW_SURFACE_NULL << 29));
296 OUT_BATCH(0);
297 OUT_BATCH(0);
298 OUT_BATCH(0);
299
300 if (intel->is_g4x || intel->gen >= 5)
301 OUT_BATCH(0);
302
303 if (intel->gen >= 6)
304 OUT_BATCH(0);
305
306 ADVANCE_BATCH();
307
308 } else if (!depth_irb && separate_stencil) {
309 /*
310 * There exists a separate stencil buffer but no depth buffer.
311 *
312 * The stencil buffer inherits most of its fields from
313 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
314 * height.
315 *
316 * Since the stencil buffer has quirky pitch requirements, its region
317 * was allocated with half height and double cpp. So we need
318 * a multiplier of 2 to obtain the surface's real height.
319 *
320 * Enable the hiz bit because it and the separate stencil bit must have
321 * the same value. From Section 2.11.5.6.1.1 3DSTATE_DEPTH_BUFFER, Bit
322 * 1.21 "Separate Stencil Enable":
323 * [DevIL]: If this field is enabled, Hierarchical Depth Buffer
324 * Enable must also be enabled.
325 *
326 * [DevGT]: This field must be set to the same value (enabled or
327 * disabled) as Hierarchical Depth Buffer Enable
328 *
329 * The tiled bit must be set. From the Sandybridge PRM, Volume 2, Part 1,
330 * Section 7.5.5.1.1 3DSTATE_DEPTH_BUFFER, Bit 1.27 Tiled Surface:
331 * [DevGT+]: This field must be set to TRUE.
332 */
333 assert(intel->has_separate_stencil);
334
335 BEGIN_BATCH(len);
336 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
337 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
338 (1 << 21) | /* separate stencil enable */
339 (1 << 22) | /* hiz enable */
340 (BRW_TILEWALK_YMAJOR << 26) |
341 (1 << 27) | /* tiled surface */
342 (BRW_SURFACE_2D << 29));
343 OUT_BATCH(0);
344 OUT_BATCH(((stencil_irb->Base.Width - 1) << 6) |
345 (stencil_irb->Base.Height - 1) << 19);
346 OUT_BATCH(0);
347 OUT_BATCH(0);
348
349 if (intel->gen >= 6)
350 OUT_BATCH(0);
351
352 ADVANCE_BATCH();
353
354 } else {
355 struct intel_region *region = depth_irb->mt->region;
356 uint32_t tile_x, tile_y, offset;
357
358 /* If using separate stencil, hiz must be enabled. */
359 assert(!separate_stencil || hiz_region);
360
361 offset = intel_renderbuffer_tile_offsets(depth_irb, &tile_x, &tile_y);
362
363 assert(intel->gen < 6 || region->tiling == I915_TILING_Y);
364 assert(!hiz_region || region->tiling == I915_TILING_Y);
365
366 BEGIN_BATCH(len);
367 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
368 OUT_BATCH(((region->pitch * region->cpp) - 1) |
369 (brw_depthbuffer_format(brw) << 18) |
370 ((hiz_region ? 1 : 0) << 21) | /* separate stencil enable */
371 ((hiz_region ? 1 : 0) << 22) | /* hiz enable */
372 (BRW_TILEWALK_YMAJOR << 26) |
373 ((region->tiling != I915_TILING_NONE) << 27) |
374 (BRW_SURFACE_2D << 29));
375 OUT_RELOC(region->bo,
376 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
377 offset);
378 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
379 (((depth_irb->Base.Width + tile_x)- 1) << 6) |
380 (((depth_irb->Base.Height + tile_y) - 1) << 19));
381 OUT_BATCH(0);
382
383 if (intel->is_g4x || intel->gen >= 5)
384 OUT_BATCH(tile_x | (tile_y << 16));
385 else
386 assert(tile_x == 0 && tile_y == 0);
387
388 if (intel->gen >= 6)
389 OUT_BATCH(0);
390
391 ADVANCE_BATCH();
392 }
393
394 if (hiz_region || separate_stencil) {
395 /*
396 * In the 3DSTATE_DEPTH_BUFFER batch emitted above, the 'separate
397 * stencil enable' and 'hiz enable' bits were set. Therefore we must
398 * emit 3DSTATE_HIER_DEPTH_BUFFER and 3DSTATE_STENCIL_BUFFER. Even if
399 * there is no stencil buffer, 3DSTATE_STENCIL_BUFFER must be emitted;
400 * failure to do so causes hangs on gen5 and a stall on gen6.
401 */
402
403 /* Emit hiz buffer. */
404 if (hiz_region) {
405 BEGIN_BATCH(3);
406 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
407 OUT_BATCH(hiz_region->pitch * hiz_region->cpp - 1);
408 OUT_RELOC(hiz_region->bo,
409 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
410 0);
411 ADVANCE_BATCH();
412 } else {
413 BEGIN_BATCH(3);
414 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER << 16) | (3 - 2));
415 OUT_BATCH(0);
416 OUT_BATCH(0);
417 ADVANCE_BATCH();
418 }
419
420 /* Emit stencil buffer. */
421 if (separate_stencil) {
422 struct intel_region *region = stencil_mt->region;
423 BEGIN_BATCH(3);
424 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
425 OUT_BATCH(region->pitch * region->cpp - 1);
426 OUT_RELOC(region->bo,
427 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
428 0);
429 ADVANCE_BATCH();
430 } else {
431 BEGIN_BATCH(3);
432 OUT_BATCH((_3DSTATE_STENCIL_BUFFER << 16) | (3 - 2));
433 OUT_BATCH(0);
434 OUT_BATCH(0);
435 ADVANCE_BATCH();
436 }
437 }
438
439 /*
440 * On Gen >= 6, emit clear params for safety. If using hiz, then clear
441 * params must be emitted.
442 *
443 * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
444 * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
445 * when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
446 */
447 if (intel->gen >= 6 || hiz_region) {
448 if (intel->gen == 6)
449 intel_emit_post_sync_nonzero_flush(intel);
450
451 BEGIN_BATCH(2);
452 OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 | (2 - 2));
453 OUT_BATCH(0);
454 ADVANCE_BATCH();
455 }
456 }
457
458 const struct brw_tracked_state brw_depthbuffer = {
459 .dirty = {
460 .mesa = _NEW_BUFFERS,
461 .brw = BRW_NEW_BATCH,
462 .cache = 0,
463 },
464 .emit = emit_depthbuffer,
465 };
466
467
468
469 /***********************************************************************
470 * Polygon stipple packet
471 */
472
473 static void upload_polygon_stipple(struct brw_context *brw)
474 {
475 struct intel_context *intel = &brw->intel;
476 struct gl_context *ctx = &brw->intel.ctx;
477 GLuint i;
478
479 /* _NEW_POLYGON */
480 if (!ctx->Polygon.StippleFlag)
481 return;
482
483 if (intel->gen == 6)
484 intel_emit_post_sync_nonzero_flush(intel);
485
486 BEGIN_BATCH(33);
487 OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
488
489 /* Polygon stipple is provided in OpenGL order, i.e. bottom
490 * row first. If we're rendering to a window (i.e. the
491 * default frame buffer object, 0), then we need to invert
492 * it to match our pixel layout. But if we're rendering
493 * to a FBO (i.e. any named frame buffer object), we *don't*
494 * need to invert - we already match the layout.
495 */
496 if (ctx->DrawBuffer->Name == 0) {
497 for (i = 0; i < 32; i++)
498 OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
499 }
500 else {
501 for (i = 0; i < 32; i++)
502 OUT_BATCH(ctx->PolygonStipple[i]);
503 }
504 CACHED_BATCH();
505 }
506
507 const struct brw_tracked_state brw_polygon_stipple = {
508 .dirty = {
509 .mesa = (_NEW_POLYGONSTIPPLE |
510 _NEW_POLYGON),
511 .brw = BRW_NEW_CONTEXT,
512 .cache = 0
513 },
514 .emit = upload_polygon_stipple
515 };
516
517
518 /***********************************************************************
519 * Polygon stipple offset packet
520 */
521
522 static void upload_polygon_stipple_offset(struct brw_context *brw)
523 {
524 struct intel_context *intel = &brw->intel;
525 struct gl_context *ctx = &brw->intel.ctx;
526
527 /* _NEW_POLYGON */
528 if (!ctx->Polygon.StippleFlag)
529 return;
530
531 if (intel->gen == 6)
532 intel_emit_post_sync_nonzero_flush(intel);
533
534 BEGIN_BATCH(2);
535 OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
536
537 /* _NEW_BUFFERS
538 *
539 * If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
540 * we have to invert the Y axis in order to match the OpenGL
541 * pixel coordinate system, and our offset must be matched
542 * to the window position. If we're drawing to a FBO
543 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
544 * system works just fine, and there's no window system to
545 * worry about.
546 */
547 if (brw->intel.ctx.DrawBuffer->Name == 0)
548 OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
549 else
550 OUT_BATCH(0);
551 CACHED_BATCH();
552 }
553
554 const struct brw_tracked_state brw_polygon_stipple_offset = {
555 .dirty = {
556 .mesa = (_NEW_BUFFERS |
557 _NEW_POLYGON),
558 .brw = BRW_NEW_CONTEXT,
559 .cache = 0
560 },
561 .emit = upload_polygon_stipple_offset
562 };
563
564 /**********************************************************************
565 * AA Line parameters
566 */
567 static void upload_aa_line_parameters(struct brw_context *brw)
568 {
569 struct intel_context *intel = &brw->intel;
570 struct gl_context *ctx = &brw->intel.ctx;
571
572 if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
573 return;
574
575 if (intel->gen == 6)
576 intel_emit_post_sync_nonzero_flush(intel);
577
578 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
579 /* use legacy aa line coverage computation */
580 OUT_BATCH(0);
581 OUT_BATCH(0);
582 CACHED_BATCH();
583 }
584
585 const struct brw_tracked_state brw_aa_line_parameters = {
586 .dirty = {
587 .mesa = _NEW_LINE,
588 .brw = BRW_NEW_CONTEXT,
589 .cache = 0
590 },
591 .emit = upload_aa_line_parameters
592 };
593
594 /***********************************************************************
595 * Line stipple packet
596 */
597
598 static void upload_line_stipple(struct brw_context *brw)
599 {
600 struct intel_context *intel = &brw->intel;
601 struct gl_context *ctx = &brw->intel.ctx;
602 GLfloat tmp;
603 GLint tmpi;
604
605 if (!ctx->Line.StippleFlag)
606 return;
607
608 if (intel->gen == 6)
609 intel_emit_post_sync_nonzero_flush(intel);
610
611 BEGIN_BATCH(3);
612 OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
613 OUT_BATCH(ctx->Line.StipplePattern);
614 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
615 tmpi = tmp * (1<<13);
616 OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
617 CACHED_BATCH();
618 }
619
620 const struct brw_tracked_state brw_line_stipple = {
621 .dirty = {
622 .mesa = _NEW_LINE,
623 .brw = BRW_NEW_CONTEXT,
624 .cache = 0
625 },
626 .emit = upload_line_stipple
627 };
628
629
630 /***********************************************************************
631 * Misc invariant state packets
632 */
633
634 static void upload_invariant_state( struct brw_context *brw )
635 {
636 struct intel_context *intel = &brw->intel;
637
638 /* 3DSTATE_SIP, 3DSTATE_MULTISAMPLE, etc. are nonpipelined. */
639 if (intel->gen == 6)
640 intel_emit_post_sync_nonzero_flush(intel);
641
642 /* Select the 3D pipeline (as opposed to media) */
643 BEGIN_BATCH(1);
644 OUT_BATCH(brw->CMD_PIPELINE_SELECT << 16 | 0);
645 ADVANCE_BATCH();
646
647 if (intel->gen < 6) {
648 /* Disable depth offset clamping. */
649 BEGIN_BATCH(2);
650 OUT_BATCH(_3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP << 16 | (2 - 2));
651 OUT_BATCH_F(0.0);
652 ADVANCE_BATCH();
653 }
654
655 if (intel->gen >= 6) {
656 int i;
657 int len = intel->gen >= 7 ? 4 : 3;
658
659 BEGIN_BATCH(len);
660 OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (len - 2));
661 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
662 MS_NUMSAMPLES_1);
663 OUT_BATCH(0); /* positions for 4/8-sample */
664 if (intel->gen >= 7)
665 OUT_BATCH(0);
666 ADVANCE_BATCH();
667
668 BEGIN_BATCH(2);
669 OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
670 OUT_BATCH(1);
671 ADVANCE_BATCH();
672
673 if (intel->gen < 7) {
674 for (i = 0; i < 4; i++) {
675 BEGIN_BATCH(4);
676 OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
677 OUT_BATCH(i << SVB_INDEX_SHIFT);
678 OUT_BATCH(0);
679 OUT_BATCH(0xffffffff);
680 ADVANCE_BATCH();
681 }
682 }
683 }
684
685 BEGIN_BATCH(2);
686 OUT_BATCH(CMD_STATE_SIP << 16 | (2 - 2));
687 OUT_BATCH(0);
688 ADVANCE_BATCH();
689
690 BEGIN_BATCH(1);
691 OUT_BATCH(brw->CMD_VF_STATISTICS << 16 |
692 (unlikely(INTEL_DEBUG & DEBUG_STATS) ? 1 : 0));
693 ADVANCE_BATCH();
694 }
695
696 const struct brw_tracked_state brw_invariant_state = {
697 .dirty = {
698 .mesa = 0,
699 .brw = BRW_NEW_CONTEXT,
700 .cache = 0
701 },
702 .emit = upload_invariant_state
703 };
704
705 /**
706 * Define the base addresses which some state is referenced from.
707 *
708 * This allows us to avoid having to emit relocations for the objects,
709 * and is actually required for binding table pointers on gen6.
710 *
711 * Surface state base address covers binding table pointers and
712 * surface state objects, but not the surfaces that the surface state
713 * objects point to.
714 */
715 static void upload_state_base_address( struct brw_context *brw )
716 {
717 struct intel_context *intel = &brw->intel;
718
719 /* FINISHME: According to section 3.6.1 "STATE_BASE_ADDRESS" of
720 * vol1a of the G45 PRM, MI_FLUSH with the ISC invalidate should be
721 * programmed prior to STATE_BASE_ADDRESS.
722 *
723 * However, given that the instruction SBA (general state base
724 * address) on this chipset is always set to 0 across X and GL,
725 * maybe this isn't required for us in particular.
726 */
727
728 if (intel->gen >= 6) {
729 if (intel->gen == 6)
730 intel_emit_post_sync_nonzero_flush(intel);
731
732 BEGIN_BATCH(10);
733 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
734 /* General state base address: stateless DP read/write requests */
735 OUT_BATCH(1);
736 /* Surface state base address:
737 * BINDING_TABLE_STATE
738 * SURFACE_STATE
739 */
740 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
741 /* Dynamic state base address:
742 * SAMPLER_STATE
743 * SAMPLER_BORDER_COLOR_STATE
744 * CLIP, SF, WM/CC viewport state
745 * COLOR_CALC_STATE
746 * DEPTH_STENCIL_STATE
747 * BLEND_STATE
748 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
749 * Disable is clear, which we rely on)
750 */
751 OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
752 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
753
754 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
755 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
756 1); /* Instruction base address: shader kernels (incl. SIP) */
757
758 OUT_BATCH(1); /* General state upper bound */
759 OUT_BATCH(1); /* Dynamic state upper bound */
760 OUT_BATCH(1); /* Indirect object upper bound */
761 OUT_BATCH(1); /* Instruction access upper bound */
762 ADVANCE_BATCH();
763 } else if (intel->gen == 5) {
764 BEGIN_BATCH(8);
765 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
766 OUT_BATCH(1); /* General state base address */
767 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
768 1); /* Surface state base address */
769 OUT_BATCH(1); /* Indirect object base address */
770 OUT_RELOC(brw->cache.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
771 1); /* Instruction base address */
772 OUT_BATCH(1); /* General state upper bound */
773 OUT_BATCH(1); /* Indirect object upper bound */
774 OUT_BATCH(1); /* Instruction access upper bound */
775 ADVANCE_BATCH();
776 } else {
777 BEGIN_BATCH(6);
778 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
779 OUT_BATCH(1); /* General state base address */
780 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
781 1); /* Surface state base address */
782 OUT_BATCH(1); /* Indirect object base address */
783 OUT_BATCH(1); /* General state upper bound */
784 OUT_BATCH(1); /* Indirect object upper bound */
785 ADVANCE_BATCH();
786 }
787
788 /* According to section 3.6.1 of VOL1 of the 965 PRM,
789 * STATE_BASE_ADDRESS updates require a reissue of:
790 *
791 * 3DSTATE_PIPELINE_POINTERS
792 * 3DSTATE_BINDING_TABLE_POINTERS
793 * MEDIA_STATE_POINTERS
794 *
795 * and this continues through Ironlake. The Sandy Bridge PRM, vol
796 * 1 part 1 says that the folowing packets must be reissued:
797 *
798 * 3DSTATE_CC_POINTERS
799 * 3DSTATE_BINDING_TABLE_POINTERS
800 * 3DSTATE_SAMPLER_STATE_POINTERS
801 * 3DSTATE_VIEWPORT_STATE_POINTERS
802 * MEDIA_STATE_POINTERS
803 *
804 * Those are always reissued following SBA updates anyway (new
805 * batch time), except in the case of the program cache BO
806 * changing. Having a separate state flag makes the sequence more
807 * obvious.
808 */
809
810 brw->state.dirty.brw |= BRW_NEW_STATE_BASE_ADDRESS;
811 }
812
813 const struct brw_tracked_state brw_state_base_address = {
814 .dirty = {
815 .mesa = 0,
816 .brw = (BRW_NEW_BATCH |
817 BRW_NEW_PROGRAM_CACHE),
818 .cache = 0,
819 },
820 .emit = upload_state_base_address
821 };