2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
45 /***********************************************************************
49 static void upload_blend_constant_color(struct brw_context
*brw
)
51 GLcontext
*ctx
= &brw
->intel
.ctx
;
52 struct brw_blend_constant_color bcc
;
54 memset(&bcc
, 0, sizeof(bcc
));
55 bcc
.header
.opcode
= CMD_BLEND_CONSTANT_COLOR
;
56 bcc
.header
.length
= sizeof(bcc
)/4-2;
57 bcc
.blend_constant_color
[0] = ctx
->Color
.BlendColor
[0];
58 bcc
.blend_constant_color
[1] = ctx
->Color
.BlendColor
[1];
59 bcc
.blend_constant_color
[2] = ctx
->Color
.BlendColor
[2];
60 bcc
.blend_constant_color
[3] = ctx
->Color
.BlendColor
[3];
62 BRW_CACHED_BATCH_STRUCT(brw
, &bcc
);
66 const struct brw_tracked_state brw_blend_constant_color
= {
69 .brw
= BRW_NEW_CONTEXT
,
72 .emit
= upload_blend_constant_color
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context
*brw
)
78 struct intel_context
*intel
= &brw
->intel
;
79 GLcontext
*ctx
= &intel
->ctx
;
82 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965
);
83 OUT_BATCH(0); /* xmin, ymin */
84 OUT_BATCH(((ctx
->DrawBuffer
->Width
- 1) & 0xffff) |
85 ((ctx
->DrawBuffer
->Height
- 1) << 16));
90 const struct brw_tracked_state brw_drawing_rect
= {
93 .brw
= BRW_NEW_CONTEXT
,
96 .emit
= upload_drawing_rect
100 * Upload the binding table pointers, which point each stage's array of surface
103 * The binding table pointers are relative to the surface state base address,
104 * which points at the batchbuffer containing the streamed batch state.
106 static void upload_binding_table_pointers(struct brw_context
*brw
)
108 struct intel_context
*intel
= &brw
->intel
;
111 OUT_BATCH(CMD_BINDING_TABLE_PTRS
<< 16 | (6 - 2));
112 OUT_BATCH(brw
->vs
.bind_bo_offset
);
113 OUT_BATCH(0); /* gs */
114 OUT_BATCH(0); /* clip */
115 OUT_BATCH(0); /* sf */
116 OUT_BATCH(brw
->wm
.bind_bo_offset
);
120 const struct brw_tracked_state brw_binding_table_pointers
= {
123 .brw
= BRW_NEW_BATCH
| BRW_NEW_BINDING_TABLE
,
126 .emit
= upload_binding_table_pointers
,
130 * Upload the binding table pointers, which point each stage's array of surface
133 * The binding table pointers are relative to the surface state base address,
134 * which points at the batchbuffer containing the streamed batch state.
136 static void upload_gen6_binding_table_pointers(struct brw_context
*brw
)
138 struct intel_context
*intel
= &brw
->intel
;
141 OUT_BATCH(CMD_BINDING_TABLE_PTRS
<< 16 |
142 GEN6_BINDING_TABLE_MODIFY_VS
|
143 GEN6_BINDING_TABLE_MODIFY_GS
|
144 GEN6_BINDING_TABLE_MODIFY_PS
|
146 OUT_BATCH(brw
->vs
.bind_bo_offset
); /* vs */
147 OUT_BATCH(0); /* gs */
148 OUT_BATCH(brw
->wm
.bind_bo_offset
); /* wm/ps */
152 const struct brw_tracked_state gen6_binding_table_pointers
= {
155 .brw
= BRW_NEW_BATCH
| BRW_NEW_BINDING_TABLE
,
158 .emit
= upload_gen6_binding_table_pointers
,
162 * Upload pointers to the per-stage state.
164 * The state pointers in this packet are all relative to the general state
165 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
167 static void upload_pipelined_state_pointers(struct brw_context
*brw
)
169 struct intel_context
*intel
= &brw
->intel
;
171 if (intel
->gen
== 5) {
172 /* Need to flush before changing clip max threads for errata. */
179 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS
<< 16 | (7 - 2));
180 OUT_RELOC(brw
->vs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
181 if (brw
->gs
.prog_active
)
182 OUT_RELOC(brw
->gs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
185 OUT_RELOC(brw
->clip
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
186 OUT_RELOC(brw
->sf
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
187 OUT_RELOC(brw
->wm
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
188 OUT_RELOC(brw
->cc
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0,
189 brw
->cc
.state_offset
);
192 brw
->state
.dirty
.brw
|= BRW_NEW_PSP
;
196 static void prepare_psp_urb_cbs(struct brw_context
*brw
)
198 brw_add_validated_bo(brw
, brw
->vs
.state_bo
);
199 brw_add_validated_bo(brw
, brw
->gs
.state_bo
);
200 brw_add_validated_bo(brw
, brw
->clip
.state_bo
);
201 brw_add_validated_bo(brw
, brw
->sf
.state_bo
);
202 brw_add_validated_bo(brw
, brw
->wm
.state_bo
);
205 static void upload_psp_urb_cbs(struct brw_context
*brw
)
207 upload_pipelined_state_pointers(brw
);
208 brw_upload_urb_fence(brw
);
209 brw_upload_cs_urb_state(brw
);
212 const struct brw_tracked_state brw_psp_urb_cbs
= {
215 .brw
= BRW_NEW_URB_FENCE
| BRW_NEW_BATCH
,
216 .cache
= (CACHE_NEW_VS_UNIT
|
219 CACHE_NEW_CLIP_UNIT
|
224 .prepare
= prepare_psp_urb_cbs
,
225 .emit
= upload_psp_urb_cbs
,
228 static void prepare_depthbuffer(struct brw_context
*brw
)
230 struct intel_region
*region
= brw
->state
.depth_region
;
233 brw_add_validated_bo(brw
, region
->buffer
);
236 static void emit_depthbuffer(struct brw_context
*brw
)
238 struct intel_context
*intel
= &brw
->intel
;
239 struct intel_region
*region
= brw
->state
.depth_region
;
244 else if (intel
->is_g4x
|| intel
->gen
== 5)
249 if (region
== NULL
) {
251 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
252 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT
<< 18) |
253 (BRW_SURFACE_NULL
<< 29));
258 if (intel
->is_g4x
|| intel
->gen
>= 5)
268 switch (region
->cpp
) {
270 format
= BRW_DEPTHFORMAT_D16_UNORM
;
273 if (intel
->depth_buffer_is_float
)
274 format
= BRW_DEPTHFORMAT_D32_FLOAT
;
276 format
= BRW_DEPTHFORMAT_D24_UNORM_S8_UINT
;
283 assert(region
->tiling
!= I915_TILING_X
);
285 assert(region
->tiling
!= I915_TILING_NONE
);
288 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
289 OUT_BATCH(((region
->pitch
* region
->cpp
) - 1) |
291 (BRW_TILEWALK_YMAJOR
<< 26) |
292 ((region
->tiling
!= I915_TILING_NONE
) << 27) |
293 (BRW_SURFACE_2D
<< 29));
294 OUT_RELOC(region
->buffer
,
295 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
297 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< 1) |
298 ((region
->width
- 1) << 6) |
299 ((region
->height
- 1) << 19));
302 if (intel
->is_g4x
|| intel
->gen
>= 5)
311 /* Initialize it for safety. */
312 if (intel
->gen
>= 6) {
314 OUT_BATCH(CMD_3D_CLEAR_PARAMS
<< 16 | (2 - 2));
320 const struct brw_tracked_state brw_depthbuffer
= {
323 .brw
= BRW_NEW_DEPTH_BUFFER
| BRW_NEW_BATCH
,
326 .prepare
= prepare_depthbuffer
,
327 .emit
= emit_depthbuffer
,
332 /***********************************************************************
333 * Polygon stipple packet
336 static void upload_polygon_stipple(struct brw_context
*brw
)
338 GLcontext
*ctx
= &brw
->intel
.ctx
;
339 struct brw_polygon_stipple bps
;
342 memset(&bps
, 0, sizeof(bps
));
343 bps
.header
.opcode
= CMD_POLY_STIPPLE_PATTERN
;
344 bps
.header
.length
= sizeof(bps
)/4-2;
346 /* Polygon stipple is provided in OpenGL order, i.e. bottom
347 * row first. If we're rendering to a window (i.e. the
348 * default frame buffer object, 0), then we need to invert
349 * it to match our pixel layout. But if we're rendering
350 * to a FBO (i.e. any named frame buffer object), we *don't*
351 * need to invert - we already match the layout.
353 if (ctx
->DrawBuffer
->Name
== 0) {
354 for (i
= 0; i
< 32; i
++)
355 bps
.stipple
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
358 for (i
= 0; i
< 32; i
++)
359 bps
.stipple
[i
] = ctx
->PolygonStipple
[i
]; /* don't invert */
362 BRW_CACHED_BATCH_STRUCT(brw
, &bps
);
365 const struct brw_tracked_state brw_polygon_stipple
= {
367 .mesa
= _NEW_POLYGONSTIPPLE
,
368 .brw
= BRW_NEW_CONTEXT
,
371 .emit
= upload_polygon_stipple
375 /***********************************************************************
376 * Polygon stipple offset packet
379 static void upload_polygon_stipple_offset(struct brw_context
*brw
)
381 GLcontext
*ctx
= &brw
->intel
.ctx
;
382 struct brw_polygon_stipple_offset bpso
;
384 memset(&bpso
, 0, sizeof(bpso
));
385 bpso
.header
.opcode
= CMD_POLY_STIPPLE_OFFSET
;
386 bpso
.header
.length
= sizeof(bpso
)/4-2;
388 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
389 * we have to invert the Y axis in order to match the OpenGL
390 * pixel coordinate system, and our offset must be matched
391 * to the window position. If we're drawing to a FBO
392 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
393 * system works just fine, and there's no window system to
396 if (brw
->intel
.ctx
.DrawBuffer
->Name
== 0) {
397 bpso
.bits0
.x_offset
= 0;
398 bpso
.bits0
.y_offset
= (32 - (ctx
->DrawBuffer
->Height
& 31)) & 31;
401 bpso
.bits0
.y_offset
= 0;
402 bpso
.bits0
.x_offset
= 0;
405 BRW_CACHED_BATCH_STRUCT(brw
, &bpso
);
408 #define _NEW_WINDOW_POS 0x40000000
410 const struct brw_tracked_state brw_polygon_stipple_offset
= {
412 .mesa
= _NEW_WINDOW_POS
,
413 .brw
= BRW_NEW_CONTEXT
,
416 .emit
= upload_polygon_stipple_offset
419 /**********************************************************************
422 static void upload_aa_line_parameters(struct brw_context
*brw
)
424 struct brw_aa_line_parameters balp
;
426 if (!brw
->has_aa_line_parameters
)
429 /* use legacy aa line coverage computation */
430 memset(&balp
, 0, sizeof(balp
));
431 balp
.header
.opcode
= CMD_AA_LINE_PARAMETERS
;
432 balp
.header
.length
= sizeof(balp
) / 4 - 2;
434 BRW_CACHED_BATCH_STRUCT(brw
, &balp
);
437 const struct brw_tracked_state brw_aa_line_parameters
= {
440 .brw
= BRW_NEW_CONTEXT
,
443 .emit
= upload_aa_line_parameters
446 /***********************************************************************
447 * Line stipple packet
450 static void upload_line_stipple(struct brw_context
*brw
)
452 GLcontext
*ctx
= &brw
->intel
.ctx
;
453 struct brw_line_stipple bls
;
457 memset(&bls
, 0, sizeof(bls
));
458 bls
.header
.opcode
= CMD_LINE_STIPPLE_PATTERN
;
459 bls
.header
.length
= sizeof(bls
)/4 - 2;
461 bls
.bits0
.pattern
= ctx
->Line
.StipplePattern
;
462 bls
.bits1
.repeat_count
= ctx
->Line
.StippleFactor
;
464 tmp
= 1.0 / (GLfloat
) ctx
->Line
.StippleFactor
;
465 tmpi
= tmp
* (1<<13);
468 bls
.bits1
.inverse_repeat_count
= tmpi
;
470 BRW_CACHED_BATCH_STRUCT(brw
, &bls
);
473 const struct brw_tracked_state brw_line_stipple
= {
476 .brw
= BRW_NEW_CONTEXT
,
479 .emit
= upload_line_stipple
483 /***********************************************************************
484 * Misc invarient state packets
487 static void upload_invarient_state( struct brw_context
*brw
)
489 struct intel_context
*intel
= &brw
->intel
;
492 /* 0x61040000 Pipeline Select */
493 /* PipelineSelect : 0 */
494 struct brw_pipeline_select ps
;
496 memset(&ps
, 0, sizeof(ps
));
497 ps
.header
.opcode
= brw
->CMD_PIPELINE_SELECT
;
498 ps
.header
.pipeline_select
= 0;
499 BRW_BATCH_STRUCT(brw
, &ps
);
502 if (intel
->gen
< 6) {
503 struct brw_global_depth_offset_clamp gdo
;
504 memset(&gdo
, 0, sizeof(gdo
));
506 /* Disable depth offset clamping.
508 gdo
.header
.opcode
= CMD_GLOBAL_DEPTH_OFFSET_CLAMP
;
509 gdo
.header
.length
= sizeof(gdo
)/4 - 2;
510 gdo
.depth_offset_clamp
= 0.0;
512 BRW_BATCH_STRUCT(brw
, &gdo
);
515 if (intel
->gen
>= 6) {
518 intel_batchbuffer_emit_mi_flush(intel
->batch
);
521 OUT_BATCH(CMD_3D_MULTISAMPLE
<< 16 | (3 - 2));
522 OUT_BATCH(MS_PIXEL_LOCATION_CENTER
|
524 OUT_BATCH(0); /* positions for 4/8-sample */
528 OUT_BATCH(CMD_3D_SAMPLE_MASK
<< 16 | (2 - 2));
532 for (i
= 0; i
< 4; i
++) {
534 OUT_BATCH(CMD_GS_SVB_INDEX
<< 16 | (4 - 2));
535 OUT_BATCH(i
<< SVB_INDEX_SHIFT
);
537 OUT_BATCH(0xffffffff);
542 /* 0x61020000 State Instruction Pointer */
544 struct brw_system_instruction_pointer sip
;
545 memset(&sip
, 0, sizeof(sip
));
547 sip
.header
.opcode
= CMD_STATE_INSN_POINTER
;
548 sip
.header
.length
= 0;
550 sip
.bits0
.system_instruction_pointer
= 0;
551 BRW_BATCH_STRUCT(brw
, &sip
);
556 struct brw_vf_statistics vfs
;
557 memset(&vfs
, 0, sizeof(vfs
));
559 vfs
.opcode
= brw
->CMD_VF_STATISTICS
;
560 if (INTEL_DEBUG
& DEBUG_STATS
)
561 vfs
.statistics_enable
= 1;
563 BRW_BATCH_STRUCT(brw
, &vfs
);
567 const struct brw_tracked_state brw_invarient_state
= {
570 .brw
= BRW_NEW_CONTEXT
,
573 .emit
= upload_invarient_state
577 * Define the base addresses which some state is referenced from.
579 * This allows us to avoid having to emit relocations for the objects,
580 * and is actually required for binding table pointers on gen6.
582 * Surface state base address covers binding table pointers and
583 * surface state objects, but not the surfaces that the surface state
586 static void upload_state_base_address( struct brw_context
*brw
)
588 struct intel_context
*intel
= &brw
->intel
;
590 if (intel
->gen
>= 6) {
592 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (10 - 2));
593 OUT_BATCH(1); /* General state base address */
594 OUT_RELOC(intel
->batch
->buf
, I915_GEM_DOMAIN_SAMPLER
, 0,
595 1); /* Surface state base address */
596 OUT_BATCH(1); /* Dynamic state base address */
597 OUT_BATCH(1); /* Indirect object base address */
598 OUT_BATCH(1); /* Instruction base address */
599 OUT_BATCH(1); /* General state upper bound */
600 OUT_BATCH(1); /* Dynamic state upper bound */
601 OUT_BATCH(1); /* Indirect object upper bound */
602 OUT_BATCH(1); /* Instruction access upper bound */
604 } else if (intel
->gen
== 5) {
606 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (8 - 2));
607 OUT_BATCH(1); /* General state base address */
608 OUT_RELOC(intel
->batch
->buf
, I915_GEM_DOMAIN_SAMPLER
, 0,
609 1); /* Surface state base address */
610 OUT_BATCH(1); /* Indirect object base address */
611 OUT_BATCH(1); /* Instruction base address */
612 OUT_BATCH(1); /* General state upper bound */
613 OUT_BATCH(1); /* Indirect object upper bound */
614 OUT_BATCH(1); /* Instruction access upper bound */
618 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (6 - 2));
619 OUT_BATCH(1); /* General state base address */
620 OUT_RELOC(intel
->batch
->buf
, I915_GEM_DOMAIN_SAMPLER
, 0,
621 1); /* Surface state base address */
622 OUT_BATCH(1); /* Indirect object base address */
623 OUT_BATCH(1); /* General state upper bound */
624 OUT_BATCH(1); /* Indirect object upper bound */
629 const struct brw_tracked_state brw_state_base_address
= {
632 .brw
= BRW_NEW_BATCH
,
635 .emit
= upload_state_base_address