2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
45 /***********************************************************************
49 static void upload_blend_constant_color(struct brw_context
*brw
)
51 GLcontext
*ctx
= &brw
->intel
.ctx
;
52 struct brw_blend_constant_color bcc
;
54 memset(&bcc
, 0, sizeof(bcc
));
55 bcc
.header
.opcode
= CMD_BLEND_CONSTANT_COLOR
;
56 bcc
.header
.length
= sizeof(bcc
)/4-2;
57 bcc
.blend_constant_color
[0] = ctx
->Color
.BlendColor
[0];
58 bcc
.blend_constant_color
[1] = ctx
->Color
.BlendColor
[1];
59 bcc
.blend_constant_color
[2] = ctx
->Color
.BlendColor
[2];
60 bcc
.blend_constant_color
[3] = ctx
->Color
.BlendColor
[3];
62 BRW_CACHED_BATCH_STRUCT(brw
, &bcc
);
66 const struct brw_tracked_state brw_blend_constant_color
= {
69 .brw
= BRW_NEW_CONTEXT
,
72 .emit
= upload_blend_constant_color
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context
*brw
)
78 struct intel_context
*intel
= &brw
->intel
;
79 GLcontext
*ctx
= &intel
->ctx
;
82 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965
);
83 OUT_BATCH(0); /* xmin, ymin */
84 OUT_BATCH(((ctx
->DrawBuffer
->Width
- 1) & 0xffff) |
85 ((ctx
->DrawBuffer
->Height
- 1) << 16));
90 const struct brw_tracked_state brw_drawing_rect
= {
93 .brw
= BRW_NEW_CONTEXT
,
96 .emit
= upload_drawing_rect
99 static void prepare_binding_table_pointers(struct brw_context
*brw
)
101 brw_add_validated_bo(brw
, brw
->vs
.bind_bo
);
102 brw_add_validated_bo(brw
, brw
->wm
.bind_bo
);
106 * Upload the binding table pointers, which point each stage's array of surface
109 * The binding table pointers are relative to the surface state base address,
112 static void upload_binding_table_pointers(struct brw_context
*brw
)
114 struct intel_context
*intel
= &brw
->intel
;
117 OUT_BATCH(CMD_BINDING_TABLE_PTRS
<< 16 | (6 - 2));
118 if (brw
->vs
.bind_bo
!= NULL
)
119 OUT_RELOC(brw
->vs
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* vs */
122 OUT_BATCH(0); /* gs */
123 OUT_BATCH(0); /* clip */
124 OUT_BATCH(0); /* sf */
125 OUT_RELOC(brw
->wm
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* wm/ps */
129 const struct brw_tracked_state brw_binding_table_pointers
= {
132 .brw
= BRW_NEW_BATCH
,
133 .cache
= CACHE_NEW_SURF_BIND
,
135 .prepare
= prepare_binding_table_pointers
,
136 .emit
= upload_binding_table_pointers
,
140 * Upload the binding table pointers, which point each stage's array of surface
143 * The binding table pointers are relative to the surface state base address,
146 static void upload_gen6_binding_table_pointers(struct brw_context
*brw
)
148 struct intel_context
*intel
= &brw
->intel
;
151 OUT_BATCH(CMD_BINDING_TABLE_PTRS
<< 16 |
152 GEN6_BINDING_TABLE_MODIFY_VS
|
153 GEN6_BINDING_TABLE_MODIFY_GS
|
154 GEN6_BINDING_TABLE_MODIFY_PS
|
156 if (brw
->vs
.bind_bo
!= NULL
)
157 OUT_RELOC(brw
->vs
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* vs */
160 OUT_BATCH(0); /* gs */
161 OUT_RELOC(brw
->wm
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* wm/ps */
165 const struct brw_tracked_state gen6_binding_table_pointers
= {
168 .brw
= BRW_NEW_BATCH
,
169 .cache
= CACHE_NEW_SURF_BIND
,
171 .prepare
= prepare_binding_table_pointers
,
172 .emit
= upload_gen6_binding_table_pointers
,
176 * Upload pointers to the per-stage state.
178 * The state pointers in this packet are all relative to the general state
179 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
181 static void upload_pipelined_state_pointers(struct brw_context
*brw
)
183 struct intel_context
*intel
= &brw
->intel
;
186 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS
<< 16 | (7 - 2));
187 OUT_RELOC(brw
->vs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
188 if (brw
->gs
.prog_active
)
189 OUT_RELOC(brw
->gs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
192 OUT_RELOC(brw
->clip
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
193 OUT_RELOC(brw
->sf
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
194 OUT_RELOC(brw
->wm
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
195 OUT_RELOC(brw
->cc
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
198 brw
->state
.dirty
.brw
|= BRW_NEW_PSP
;
202 static void prepare_psp_urb_cbs(struct brw_context
*brw
)
204 brw_add_validated_bo(brw
, brw
->vs
.state_bo
);
205 brw_add_validated_bo(brw
, brw
->gs
.state_bo
);
206 brw_add_validated_bo(brw
, brw
->clip
.state_bo
);
207 brw_add_validated_bo(brw
, brw
->sf
.state_bo
);
208 brw_add_validated_bo(brw
, brw
->wm
.state_bo
);
209 brw_add_validated_bo(brw
, brw
->cc
.state_bo
);
212 static void upload_psp_urb_cbs(struct brw_context
*brw
)
214 upload_pipelined_state_pointers(brw
);
215 brw_upload_urb_fence(brw
);
216 brw_upload_cs_urb_state(brw
);
219 const struct brw_tracked_state brw_psp_urb_cbs
= {
222 .brw
= BRW_NEW_URB_FENCE
| BRW_NEW_BATCH
,
223 .cache
= (CACHE_NEW_VS_UNIT
|
226 CACHE_NEW_CLIP_UNIT
|
231 .prepare
= prepare_psp_urb_cbs
,
232 .emit
= upload_psp_urb_cbs
,
235 static void prepare_depthbuffer(struct brw_context
*brw
)
237 struct intel_region
*region
= brw
->state
.depth_region
;
240 brw_add_validated_bo(brw
, region
->buffer
);
243 static void emit_depthbuffer(struct brw_context
*brw
)
245 struct intel_context
*intel
= &brw
->intel
;
246 struct intel_region
*region
= brw
->state
.depth_region
;
251 else if (intel
->is_g4x
|| intel
->gen
== 5)
256 if (region
== NULL
) {
258 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
259 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT
<< 18) |
260 (BRW_SURFACE_NULL
<< 29));
265 if (intel
->is_g4x
|| intel
->gen
>= 5)
275 switch (region
->cpp
) {
277 format
= BRW_DEPTHFORMAT_D16_UNORM
;
280 if (intel
->depth_buffer_is_float
)
281 format
= BRW_DEPTHFORMAT_D32_FLOAT
;
283 format
= BRW_DEPTHFORMAT_D24_UNORM_S8_UINT
;
290 assert(region
->tiling
!= I915_TILING_X
);
291 if (IS_GEN6(intel
->intelScreen
->deviceID
))
292 assert(region
->tiling
!= I915_TILING_NONE
);
295 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
296 OUT_BATCH(((region
->pitch
* region
->cpp
) - 1) |
298 (BRW_TILEWALK_YMAJOR
<< 26) |
299 ((region
->tiling
!= I915_TILING_NONE
) << 27) |
300 (BRW_SURFACE_2D
<< 29));
301 OUT_RELOC(region
->buffer
,
302 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
304 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< 1) |
305 ((region
->pitch
- 1) << 6) |
306 ((region
->height
- 1) << 19));
309 if (intel
->is_g4x
|| intel
->gen
>= 5)
318 /* Initialize it for safety. */
319 if (intel
->gen
>= 6) {
321 OUT_BATCH(CMD_3D_CLEAR_PARAMS
<< 16 | (2 - 2));
327 const struct brw_tracked_state brw_depthbuffer
= {
330 .brw
= BRW_NEW_DEPTH_BUFFER
| BRW_NEW_BATCH
,
333 .prepare
= prepare_depthbuffer
,
334 .emit
= emit_depthbuffer
,
339 /***********************************************************************
340 * Polygon stipple packet
343 static void upload_polygon_stipple(struct brw_context
*brw
)
345 GLcontext
*ctx
= &brw
->intel
.ctx
;
346 struct brw_polygon_stipple bps
;
349 memset(&bps
, 0, sizeof(bps
));
350 bps
.header
.opcode
= CMD_POLY_STIPPLE_PATTERN
;
351 bps
.header
.length
= sizeof(bps
)/4-2;
353 /* Polygon stipple is provided in OpenGL order, i.e. bottom
354 * row first. If we're rendering to a window (i.e. the
355 * default frame buffer object, 0), then we need to invert
356 * it to match our pixel layout. But if we're rendering
357 * to a FBO (i.e. any named frame buffer object), we *don't*
358 * need to invert - we already match the layout.
360 if (ctx
->DrawBuffer
->Name
== 0) {
361 for (i
= 0; i
< 32; i
++)
362 bps
.stipple
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
365 for (i
= 0; i
< 32; i
++)
366 bps
.stipple
[i
] = ctx
->PolygonStipple
[i
]; /* don't invert */
369 BRW_CACHED_BATCH_STRUCT(brw
, &bps
);
372 const struct brw_tracked_state brw_polygon_stipple
= {
374 .mesa
= _NEW_POLYGONSTIPPLE
,
375 .brw
= BRW_NEW_CONTEXT
,
378 .emit
= upload_polygon_stipple
382 /***********************************************************************
383 * Polygon stipple offset packet
386 static void upload_polygon_stipple_offset(struct brw_context
*brw
)
388 GLcontext
*ctx
= &brw
->intel
.ctx
;
389 struct brw_polygon_stipple_offset bpso
;
391 memset(&bpso
, 0, sizeof(bpso
));
392 bpso
.header
.opcode
= CMD_POLY_STIPPLE_OFFSET
;
393 bpso
.header
.length
= sizeof(bpso
)/4-2;
395 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
396 * we have to invert the Y axis in order to match the OpenGL
397 * pixel coordinate system, and our offset must be matched
398 * to the window position. If we're drawing to a FBO
399 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
400 * system works just fine, and there's no window system to
403 if (brw
->intel
.ctx
.DrawBuffer
->Name
== 0) {
404 bpso
.bits0
.x_offset
= 0;
405 bpso
.bits0
.y_offset
= (32 - (ctx
->DrawBuffer
->Height
& 31)) & 31;
408 bpso
.bits0
.y_offset
= 0;
409 bpso
.bits0
.x_offset
= 0;
412 BRW_CACHED_BATCH_STRUCT(brw
, &bpso
);
415 #define _NEW_WINDOW_POS 0x40000000
417 const struct brw_tracked_state brw_polygon_stipple_offset
= {
419 .mesa
= _NEW_WINDOW_POS
,
420 .brw
= BRW_NEW_CONTEXT
,
423 .emit
= upload_polygon_stipple_offset
426 /**********************************************************************
429 static void upload_aa_line_parameters(struct brw_context
*brw
)
431 struct brw_aa_line_parameters balp
;
433 if (!brw
->has_aa_line_parameters
)
436 /* use legacy aa line coverage computation */
437 memset(&balp
, 0, sizeof(balp
));
438 balp
.header
.opcode
= CMD_AA_LINE_PARAMETERS
;
439 balp
.header
.length
= sizeof(balp
) / 4 - 2;
441 BRW_CACHED_BATCH_STRUCT(brw
, &balp
);
444 const struct brw_tracked_state brw_aa_line_parameters
= {
447 .brw
= BRW_NEW_CONTEXT
,
450 .emit
= upload_aa_line_parameters
453 /***********************************************************************
454 * Line stipple packet
457 static void upload_line_stipple(struct brw_context
*brw
)
459 GLcontext
*ctx
= &brw
->intel
.ctx
;
460 struct brw_line_stipple bls
;
464 memset(&bls
, 0, sizeof(bls
));
465 bls
.header
.opcode
= CMD_LINE_STIPPLE_PATTERN
;
466 bls
.header
.length
= sizeof(bls
)/4 - 2;
468 bls
.bits0
.pattern
= ctx
->Line
.StipplePattern
;
469 bls
.bits1
.repeat_count
= ctx
->Line
.StippleFactor
;
471 tmp
= 1.0 / (GLfloat
) ctx
->Line
.StippleFactor
;
472 tmpi
= tmp
* (1<<13);
475 bls
.bits1
.inverse_repeat_count
= tmpi
;
477 BRW_CACHED_BATCH_STRUCT(brw
, &bls
);
480 const struct brw_tracked_state brw_line_stipple
= {
483 .brw
= BRW_NEW_CONTEXT
,
486 .emit
= upload_line_stipple
490 /***********************************************************************
491 * Misc invarient state packets
494 static void upload_invarient_state( struct brw_context
*brw
)
496 struct intel_context
*intel
= &brw
->intel
;
499 /* 0x61040000 Pipeline Select */
500 /* PipelineSelect : 0 */
501 struct brw_pipeline_select ps
;
503 memset(&ps
, 0, sizeof(ps
));
504 ps
.header
.opcode
= brw
->CMD_PIPELINE_SELECT
;
505 ps
.header
.pipeline_select
= 0;
506 BRW_BATCH_STRUCT(brw
, &ps
);
509 if (intel
->gen
< 6) {
510 struct brw_global_depth_offset_clamp gdo
;
511 memset(&gdo
, 0, sizeof(gdo
));
513 /* Disable depth offset clamping.
515 gdo
.header
.opcode
= CMD_GLOBAL_DEPTH_OFFSET_CLAMP
;
516 gdo
.header
.length
= sizeof(gdo
)/4 - 2;
517 gdo
.depth_offset_clamp
= 0.0;
519 BRW_BATCH_STRUCT(brw
, &gdo
);
522 intel_batchbuffer_emit_mi_flush(intel
->batch
);
524 if (intel
->gen
>= 6) {
528 OUT_BATCH(CMD_3D_MULTISAMPLE
<< 16 | (3 - 2));
529 OUT_BATCH(MS_PIXEL_LOCATION_CENTER
|
531 OUT_BATCH(0); /* positions for 4/8-sample */
535 OUT_BATCH(CMD_3D_SAMPLE_MASK
<< 16 | (2 - 2));
539 for (i
= 0; i
< 4; i
++) {
541 OUT_BATCH(CMD_GS_SVB_INDEX
<< 16 | (4 - 2));
542 OUT_BATCH(i
<< SVB_INDEX_SHIFT
);
544 OUT_BATCH(0xffffffff);
549 /* 0x61020000 State Instruction Pointer */
551 struct brw_system_instruction_pointer sip
;
552 memset(&sip
, 0, sizeof(sip
));
554 sip
.header
.opcode
= CMD_STATE_INSN_POINTER
;
555 sip
.header
.length
= 0;
557 sip
.bits0
.system_instruction_pointer
= 0;
558 BRW_BATCH_STRUCT(brw
, &sip
);
563 struct brw_vf_statistics vfs
;
564 memset(&vfs
, 0, sizeof(vfs
));
566 vfs
.opcode
= brw
->CMD_VF_STATISTICS
;
567 if (INTEL_DEBUG
& DEBUG_STATS
)
568 vfs
.statistics_enable
= 1;
570 BRW_BATCH_STRUCT(brw
, &vfs
);
574 const struct brw_tracked_state brw_invarient_state
= {
577 .brw
= BRW_NEW_CONTEXT
,
580 .emit
= upload_invarient_state
584 * Define the base addresses which some state is referenced from.
586 * This allows us to avoid having to emit relocations in many places for
587 * cached state, and instead emit pointers inside of large, mostly-static
588 * state pools. This comes at the expense of memory, and more expensive cache
591 static void upload_state_base_address( struct brw_context
*brw
)
593 struct intel_context
*intel
= &brw
->intel
;
595 /* Output the structure (brw_state_base_address) directly to the
596 * batchbuffer, so we can emit relocations inline.
598 if (intel
->gen
>= 6) {
600 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (10 - 2));
601 OUT_BATCH(1); /* General state base address */
602 OUT_BATCH(1); /* Surface state base address */
603 OUT_BATCH(1); /* Dynamic state base address */
604 OUT_BATCH(1); /* Indirect object base address */
605 OUT_BATCH(1); /* Instruction base address */
606 OUT_BATCH(1); /* General state upper bound */
607 OUT_BATCH(1); /* Dynamic state upper bound */
608 OUT_BATCH(1); /* Indirect object upper bound */
609 OUT_BATCH(1); /* Instruction access upper bound */
611 } else if (intel
->gen
== 5) {
613 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (8 - 2));
614 OUT_BATCH(1); /* General state base address */
615 OUT_BATCH(1); /* Surface state base address */
616 OUT_BATCH(1); /* Indirect object base address */
617 OUT_BATCH(1); /* Instruction base address */
618 OUT_BATCH(1); /* General state upper bound */
619 OUT_BATCH(1); /* Indirect object upper bound */
620 OUT_BATCH(1); /* Instruction access upper bound */
624 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (6 - 2));
625 OUT_BATCH(1); /* General state base address */
626 OUT_BATCH(1); /* Surface state base address */
627 OUT_BATCH(1); /* Indirect object base address */
628 OUT_BATCH(1); /* General state upper bound */
629 OUT_BATCH(1); /* Indirect object upper bound */
634 const struct brw_tracked_state brw_state_base_address
= {
637 .brw
= BRW_NEW_CONTEXT
,
640 .emit
= upload_state_base_address