2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_fbo.h"
36 #include "intel_regions.h"
38 #include "brw_context.h"
39 #include "brw_state.h"
40 #include "brw_defines.h"
42 /* Constant single cliprect for framebuffer object or DRI2 drawing */
43 static void upload_drawing_rect(struct brw_context
*brw
)
45 struct intel_context
*intel
= &brw
->intel
;
46 struct gl_context
*ctx
= &intel
->ctx
;
49 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965
);
50 OUT_BATCH(0); /* xmin, ymin */
51 OUT_BATCH(((ctx
->DrawBuffer
->Width
- 1) & 0xffff) |
52 ((ctx
->DrawBuffer
->Height
- 1) << 16));
57 const struct brw_tracked_state brw_drawing_rect
= {
60 .brw
= BRW_NEW_CONTEXT
,
63 .emit
= upload_drawing_rect
67 * Upload the binding table pointers, which point each stage's array of surface
70 * The binding table pointers are relative to the surface state base address,
71 * which points at the batchbuffer containing the streamed batch state.
73 static void upload_binding_table_pointers(struct brw_context
*brw
)
75 struct intel_context
*intel
= &brw
->intel
;
78 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS
<< 16 | (6 - 2));
79 OUT_BATCH(brw
->vs
.bind_bo_offset
);
80 OUT_BATCH(0); /* gs */
81 OUT_BATCH(0); /* clip */
82 OUT_BATCH(0); /* sf */
83 OUT_BATCH(brw
->wm
.bind_bo_offset
);
87 const struct brw_tracked_state brw_binding_table_pointers
= {
91 | BRW_NEW_VS_BINDING_TABLE
92 | BRW_NEW_GS_BINDING_TABLE
93 | BRW_NEW_PS_BINDING_TABLE
,
96 .emit
= upload_binding_table_pointers
,
100 * Upload the binding table pointers, which point each stage's array of surface
103 * The binding table pointers are relative to the surface state base address,
104 * which points at the batchbuffer containing the streamed batch state.
106 static void upload_gen6_binding_table_pointers(struct brw_context
*brw
)
108 struct intel_context
*intel
= &brw
->intel
;
111 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS
<< 16 |
112 GEN6_BINDING_TABLE_MODIFY_VS
|
113 GEN6_BINDING_TABLE_MODIFY_GS
|
114 GEN6_BINDING_TABLE_MODIFY_PS
|
116 OUT_BATCH(brw
->vs
.bind_bo_offset
); /* vs */
117 OUT_BATCH(0); /* gs */
118 OUT_BATCH(brw
->wm
.bind_bo_offset
); /* wm/ps */
122 const struct brw_tracked_state gen6_binding_table_pointers
= {
126 | BRW_NEW_VS_BINDING_TABLE
127 | BRW_NEW_GS_BINDING_TABLE
128 | BRW_NEW_PS_BINDING_TABLE
,
131 .emit
= upload_gen6_binding_table_pointers
,
135 * Upload pointers to the per-stage state.
137 * The state pointers in this packet are all relative to the general state
138 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
140 static void upload_pipelined_state_pointers(struct brw_context
*brw
)
142 struct intel_context
*intel
= &brw
->intel
;
144 if (intel
->gen
== 5) {
145 /* Need to flush before changing clip max threads for errata. */
152 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS
<< 16 | (7 - 2));
153 OUT_RELOC(intel
->batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
154 brw
->vs
.state_offset
);
155 if (brw
->gs
.prog_active
)
156 OUT_RELOC(brw
->intel
.batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
157 brw
->gs
.state_offset
| 1);
160 OUT_RELOC(brw
->intel
.batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
161 brw
->clip
.state_offset
| 1);
162 OUT_RELOC(brw
->intel
.batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
163 brw
->sf
.state_offset
);
164 OUT_RELOC(brw
->intel
.batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
165 brw
->wm
.state_offset
);
166 OUT_RELOC(brw
->intel
.batch
.bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
167 brw
->cc
.state_offset
);
170 brw
->state
.dirty
.brw
|= BRW_NEW_PSP
;
173 static void upload_psp_urb_cbs(struct brw_context
*brw
)
175 upload_pipelined_state_pointers(brw
);
176 brw_upload_urb_fence(brw
);
177 brw_upload_cs_urb_state(brw
);
180 const struct brw_tracked_state brw_psp_urb_cbs
= {
183 .brw
= BRW_NEW_URB_FENCE
| BRW_NEW_BATCH
,
184 .cache
= (CACHE_NEW_VS_UNIT
|
187 CACHE_NEW_CLIP_UNIT
|
192 .emit
= upload_psp_urb_cbs
,
195 static void prepare_depthbuffer(struct brw_context
*brw
)
197 struct intel_context
*intel
= &brw
->intel
;
198 struct gl_context
*ctx
= &intel
->ctx
;
199 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
200 struct intel_renderbuffer
*drb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
201 struct intel_renderbuffer
*srb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
204 brw_add_validated_bo(brw
, drb
->region
->buffer
);
205 if (drb
&& drb
->hiz_region
)
206 brw_add_validated_bo(brw
, drb
->hiz_region
->buffer
);
208 brw_add_validated_bo(brw
, srb
->region
->buffer
);
211 static void emit_depthbuffer(struct brw_context
*brw
)
213 struct intel_context
*intel
= &brw
->intel
;
214 struct gl_context
*ctx
= &intel
->ctx
;
215 struct gl_framebuffer
*fb
= ctx
->DrawBuffer
;
217 struct intel_renderbuffer
*depth_irb
= intel_get_renderbuffer(fb
, BUFFER_DEPTH
);
218 struct intel_renderbuffer
*stencil_irb
= intel_get_renderbuffer(fb
, BUFFER_STENCIL
);
219 struct intel_region
*hiz_region
= depth_irb
? depth_irb
->hiz_region
: NULL
;
223 * If either depth or stencil buffer has packed depth/stencil format,
224 * then don't use separate stencil. Emit only a depth buffer.
226 if (depth_irb
&& depth_irb
->Base
.Format
== MESA_FORMAT_S8_Z24
) {
228 } else if (!depth_irb
&& stencil_irb
229 && stencil_irb
->Base
.Format
== MESA_FORMAT_S8_Z24
) {
230 depth_irb
= stencil_irb
;
236 else if (intel
->is_g4x
|| intel
->gen
== 5)
241 if (!depth_irb
&& !stencil_irb
) {
243 OUT_BATCH(_3DSTATE_DEPTH_BUFFER
<< 16 | (len
- 2));
244 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT
<< 18) |
245 (BRW_SURFACE_NULL
<< 29));
250 if (intel
->is_g4x
|| intel
->gen
>= 5)
258 } else if (!depth_irb
&& stencil_irb
) {
260 * There exists a separate stencil buffer but no depth buffer.
262 * The stencil buffer inherits most of its fields from
263 * 3DSTATE_DEPTH_BUFFER: namely the tile walk, surface type, width, and
266 * Since the stencil buffer has quirky pitch requirements, its region
267 * was allocated with half height and double cpp. So we need
268 * a multiplier of 2 to obtain the surface's real height.
270 * Enable the hiz bit because it and the separate stencil bit must have
271 * the same value. From Section 2.11.5.6.1.1 3DSTATE_DEPTH_BUFFER, Bit
272 * 1.21 "Separate Stencil Enable":
273 * [DevIL]: If this field is enabled, Hierarchical Depth Buffer
274 * Enable must also be enabled.
276 * [DevGT]: This field must be set to the same value (enabled or
277 * disabled) as Hierarchical Depth Buffer Enable
279 assert(intel
->has_separate_stencil
);
280 assert(stencil_irb
->Base
.Format
== MESA_FORMAT_S8
);
283 OUT_BATCH(_3DSTATE_DEPTH_BUFFER
<< 16 | (len
- 2));
284 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT
<< 18) |
285 (1 << 21) | /* separate stencil enable */
286 (1 << 22) | /* hiz enable */
287 (BRW_TILEWALK_YMAJOR
<< 26) |
288 (BRW_SURFACE_2D
<< 29));
290 OUT_BATCH(((stencil_irb
->region
->width
- 1) << 6) |
291 (2 * stencil_irb
->region
->height
- 1) << 19);
301 struct intel_region
*region
= depth_irb
->region
;
303 uint32_t tile_x
, tile_y
, offset
;
305 /* If using separate stencil, hiz must be enabled. */
306 assert(!stencil_irb
|| hiz_region
);
308 switch (region
->cpp
) {
310 format
= BRW_DEPTHFORMAT_D16_UNORM
;
313 if (intel
->depth_buffer_is_float
)
314 format
= BRW_DEPTHFORMAT_D32_FLOAT
;
316 format
= BRW_DEPTHFORMAT_D24_UNORM_X8_UINT
;
318 format
= BRW_DEPTHFORMAT_D24_UNORM_S8_UINT
;
325 offset
= intel_region_tile_offsets(region
, &tile_x
, &tile_y
);
327 assert(intel
->gen
< 6 || region
->tiling
== I915_TILING_Y
);
328 assert(!hiz_region
|| region
->tiling
== I915_TILING_Y
);
331 OUT_BATCH(_3DSTATE_DEPTH_BUFFER
<< 16 | (len
- 2));
332 OUT_BATCH(((region
->pitch
* region
->cpp
) - 1) |
334 ((hiz_region
? 1 : 0) << 21) | /* separate stencil enable */
335 ((hiz_region
? 1 : 0) << 22) | /* hiz enable */
336 (BRW_TILEWALK_YMAJOR
<< 26) |
337 ((region
->tiling
!= I915_TILING_NONE
) << 27) |
338 (BRW_SURFACE_2D
<< 29));
339 OUT_RELOC(region
->buffer
,
340 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
342 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< 1) |
343 ((region
->width
- 1) << 6) |
344 ((region
->height
- 1) << 19));
347 if (intel
->is_g4x
|| intel
->gen
>= 5)
348 OUT_BATCH(tile_x
| (tile_y
<< 16));
350 assert(tile_x
== 0 && tile_y
== 0);
358 /* Emit hiz buffer. */
359 if (hiz_region
|| stencil_irb
) {
361 OUT_BATCH((_3DSTATE_HIER_DEPTH_BUFFER
<< 16) | (3 - 2));
362 OUT_BATCH(hiz_region
->pitch
* hiz_region
->cpp
- 1);
363 OUT_RELOC(hiz_region
->buffer
,
364 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
369 /* Emit stencil buffer. */
370 if (hiz_region
|| stencil_irb
) {
372 OUT_BATCH((_3DSTATE_STENCIL_BUFFER
<< 16) | (3 - 2));
373 OUT_BATCH(stencil_irb
->region
->pitch
* stencil_irb
->region
->cpp
- 1);
374 OUT_RELOC(stencil_irb
->region
->buffer
,
375 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
381 * On Gen >= 6, emit clear params for safety. If using hiz, then clear
382 * params must be emitted.
384 * From Section 2.11.5.6.4.1 3DSTATE_CLEAR_PARAMS:
385 * 3DSTATE_CLEAR_PARAMS packet must follow the DEPTH_BUFFER_STATE packet
386 * when HiZ is enabled and the DEPTH_BUFFER_STATE changes.
388 if (intel
->gen
>= 6 || hiz_region
) {
390 OUT_BATCH(_3DSTATE_CLEAR_PARAMS
<< 16 | (2 - 2));
396 const struct brw_tracked_state brw_depthbuffer
= {
398 .mesa
= _NEW_BUFFERS
,
399 .brw
= BRW_NEW_BATCH
,
402 .prepare
= prepare_depthbuffer
,
403 .emit
= emit_depthbuffer
,
408 /***********************************************************************
409 * Polygon stipple packet
412 static void upload_polygon_stipple(struct brw_context
*brw
)
414 struct intel_context
*intel
= &brw
->intel
;
415 struct gl_context
*ctx
= &brw
->intel
.ctx
;
418 if (!ctx
->Polygon
.StippleFlag
)
422 OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN
<< 16 | (33 - 2));
424 /* Polygon stipple is provided in OpenGL order, i.e. bottom
425 * row first. If we're rendering to a window (i.e. the
426 * default frame buffer object, 0), then we need to invert
427 * it to match our pixel layout. But if we're rendering
428 * to a FBO (i.e. any named frame buffer object), we *don't*
429 * need to invert - we already match the layout.
431 if (ctx
->DrawBuffer
->Name
== 0) {
432 for (i
= 0; i
< 32; i
++)
433 OUT_BATCH(ctx
->PolygonStipple
[31 - i
]); /* invert */
436 for (i
= 0; i
< 32; i
++)
437 OUT_BATCH(ctx
->PolygonStipple
[i
]);
442 const struct brw_tracked_state brw_polygon_stipple
= {
444 .mesa
= _NEW_POLYGONSTIPPLE
,
445 .brw
= BRW_NEW_CONTEXT
,
448 .emit
= upload_polygon_stipple
452 /***********************************************************************
453 * Polygon stipple offset packet
456 static void upload_polygon_stipple_offset(struct brw_context
*brw
)
458 struct intel_context
*intel
= &brw
->intel
;
459 struct gl_context
*ctx
= &brw
->intel
.ctx
;
461 if (!ctx
->Polygon
.StippleFlag
)
465 OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET
<< 16 | (2-2));
467 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
468 * we have to invert the Y axis in order to match the OpenGL
469 * pixel coordinate system, and our offset must be matched
470 * to the window position. If we're drawing to a FBO
471 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
472 * system works just fine, and there's no window system to
475 if (brw
->intel
.ctx
.DrawBuffer
->Name
== 0)
476 OUT_BATCH((32 - (ctx
->DrawBuffer
->Height
& 31)) & 31);
482 #define _NEW_WINDOW_POS 0x40000000
484 const struct brw_tracked_state brw_polygon_stipple_offset
= {
486 .mesa
= _NEW_WINDOW_POS
| _NEW_POLYGONSTIPPLE
,
487 .brw
= BRW_NEW_CONTEXT
,
490 .emit
= upload_polygon_stipple_offset
493 /**********************************************************************
496 static void upload_aa_line_parameters(struct brw_context
*brw
)
498 struct intel_context
*intel
= &brw
->intel
;
499 struct gl_context
*ctx
= &brw
->intel
.ctx
;
501 if (!ctx
->Line
.SmoothFlag
|| !brw
->has_aa_line_parameters
)
504 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS
<< 16 | (3 - 2));
505 /* use legacy aa line coverage computation */
511 const struct brw_tracked_state brw_aa_line_parameters
= {
514 .brw
= BRW_NEW_CONTEXT
,
517 .emit
= upload_aa_line_parameters
520 /***********************************************************************
521 * Line stipple packet
524 static void upload_line_stipple(struct brw_context
*brw
)
526 struct intel_context
*intel
= &brw
->intel
;
527 struct gl_context
*ctx
= &brw
->intel
.ctx
;
531 if (!ctx
->Line
.StippleFlag
)
535 OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN
<< 16 | (3 - 2));
536 OUT_BATCH(ctx
->Line
.StipplePattern
);
537 tmp
= 1.0 / (GLfloat
) ctx
->Line
.StippleFactor
;
538 tmpi
= tmp
* (1<<13);
539 OUT_BATCH(tmpi
<< 16 | ctx
->Line
.StippleFactor
);
543 const struct brw_tracked_state brw_line_stipple
= {
546 .brw
= BRW_NEW_CONTEXT
,
549 .emit
= upload_line_stipple
553 /***********************************************************************
554 * Misc invarient state packets
557 static void upload_invarient_state( struct brw_context
*brw
)
559 struct intel_context
*intel
= &brw
->intel
;
562 /* 0x61040000 Pipeline Select */
563 /* PipelineSelect : 0 */
564 struct brw_pipeline_select ps
;
566 memset(&ps
, 0, sizeof(ps
));
567 ps
.header
.opcode
= brw
->CMD_PIPELINE_SELECT
;
568 ps
.header
.pipeline_select
= 0;
569 BRW_BATCH_STRUCT(brw
, &ps
);
572 if (intel
->gen
< 6) {
573 struct brw_global_depth_offset_clamp gdo
;
574 memset(&gdo
, 0, sizeof(gdo
));
576 /* Disable depth offset clamping.
578 gdo
.header
.opcode
= _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP
;
579 gdo
.header
.length
= sizeof(gdo
)/4 - 2;
580 gdo
.depth_offset_clamp
= 0.0;
582 BRW_BATCH_STRUCT(brw
, &gdo
);
585 if (intel
->gen
>= 6) {
587 int len
= intel
->gen
>= 7 ? 4 : 3;
590 OUT_BATCH(_3DSTATE_MULTISAMPLE
<< 16 | (len
- 2));
591 OUT_BATCH(MS_PIXEL_LOCATION_CENTER
|
593 OUT_BATCH(0); /* positions for 4/8-sample */
599 OUT_BATCH(_3DSTATE_SAMPLE_MASK
<< 16 | (2 - 2));
603 if (intel
->gen
< 7) {
604 for (i
= 0; i
< 4; i
++) {
606 OUT_BATCH(_3DSTATE_GS_SVB_INDEX
<< 16 | (4 - 2));
607 OUT_BATCH(i
<< SVB_INDEX_SHIFT
);
609 OUT_BATCH(0xffffffff);
615 /* 0x61020000 State Instruction Pointer */
617 struct brw_system_instruction_pointer sip
;
618 memset(&sip
, 0, sizeof(sip
));
620 sip
.header
.opcode
= CMD_STATE_INSN_POINTER
;
621 sip
.header
.length
= 0;
623 sip
.bits0
.system_instruction_pointer
= 0;
624 BRW_BATCH_STRUCT(brw
, &sip
);
629 struct brw_vf_statistics vfs
;
630 memset(&vfs
, 0, sizeof(vfs
));
632 vfs
.opcode
= brw
->CMD_VF_STATISTICS
;
633 if (unlikely(INTEL_DEBUG
& DEBUG_STATS
))
634 vfs
.statistics_enable
= 1;
636 BRW_BATCH_STRUCT(brw
, &vfs
);
640 const struct brw_tracked_state brw_invarient_state
= {
643 .brw
= BRW_NEW_CONTEXT
,
646 .emit
= upload_invarient_state
650 * Define the base addresses which some state is referenced from.
652 * This allows us to avoid having to emit relocations for the objects,
653 * and is actually required for binding table pointers on gen6.
655 * Surface state base address covers binding table pointers and
656 * surface state objects, but not the surfaces that the surface state
659 static void upload_state_base_address( struct brw_context
*brw
)
661 struct intel_context
*intel
= &brw
->intel
;
663 if (intel
->gen
>= 6) {
665 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (10 - 2));
666 /* General state base address: stateless DP read/write requests */
668 /* Surface state base address:
669 * BINDING_TABLE_STATE
672 OUT_RELOC(intel
->batch
.bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 1);
673 /* Dynamic state base address:
675 * SAMPLER_BORDER_COLOR_STATE
676 * CLIP, SF, WM/CC viewport state
678 * DEPTH_STENCIL_STATE
680 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
681 * Disable is clear, which we rely on)
683 OUT_RELOC(intel
->batch
.bo
, (I915_GEM_DOMAIN_RENDER
|
684 I915_GEM_DOMAIN_INSTRUCTION
), 0, 1);
686 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
687 OUT_BATCH(1); /* Instruction base address: shader kernels (incl. SIP) */
688 OUT_BATCH(1); /* General state upper bound */
689 OUT_BATCH(1); /* Dynamic state upper bound */
690 OUT_BATCH(1); /* Indirect object upper bound */
691 OUT_BATCH(1); /* Instruction access upper bound */
693 } else if (intel
->gen
== 5) {
695 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (8 - 2));
696 OUT_BATCH(1); /* General state base address */
697 OUT_RELOC(intel
->batch
.bo
, I915_GEM_DOMAIN_SAMPLER
, 0,
698 1); /* Surface state base address */
699 OUT_BATCH(1); /* Indirect object base address */
700 OUT_BATCH(1); /* Instruction base address */
701 OUT_BATCH(1); /* General state upper bound */
702 OUT_BATCH(1); /* Indirect object upper bound */
703 OUT_BATCH(1); /* Instruction access upper bound */
707 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (6 - 2));
708 OUT_BATCH(1); /* General state base address */
709 OUT_RELOC(intel
->batch
.bo
, I915_GEM_DOMAIN_SAMPLER
, 0,
710 1); /* Surface state base address */
711 OUT_BATCH(1); /* Indirect object base address */
712 OUT_BATCH(1); /* General state upper bound */
713 OUT_BATCH(1); /* Indirect object upper bound */
718 const struct brw_tracked_state brw_state_base_address
= {
721 .brw
= BRW_NEW_BATCH
,
724 .emit
= upload_state_base_address