2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
45 /***********************************************************************
49 static void upload_blend_constant_color(struct brw_context
*brw
)
51 GLcontext
*ctx
= &brw
->intel
.ctx
;
52 struct brw_blend_constant_color bcc
;
54 memset(&bcc
, 0, sizeof(bcc
));
55 bcc
.header
.opcode
= CMD_BLEND_CONSTANT_COLOR
;
56 bcc
.header
.length
= sizeof(bcc
)/4-2;
57 bcc
.blend_constant_color
[0] = ctx
->Color
.BlendColor
[0];
58 bcc
.blend_constant_color
[1] = ctx
->Color
.BlendColor
[1];
59 bcc
.blend_constant_color
[2] = ctx
->Color
.BlendColor
[2];
60 bcc
.blend_constant_color
[3] = ctx
->Color
.BlendColor
[3];
62 BRW_CACHED_BATCH_STRUCT(brw
, &bcc
);
66 const struct brw_tracked_state brw_blend_constant_color
= {
72 .emit
= upload_blend_constant_color
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context
*brw
)
78 struct intel_context
*intel
= &brw
->intel
;
79 GLcontext
*ctx
= &intel
->ctx
;
81 if (!intel
->constant_cliprect
)
84 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS
);
85 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965
);
86 OUT_BATCH(0); /* xmin, ymin */
87 OUT_BATCH(((ctx
->DrawBuffer
->Width
- 1) & 0xffff) |
88 ((ctx
->DrawBuffer
->Height
- 1) << 16));
93 const struct brw_tracked_state brw_drawing_rect
= {
99 .emit
= upload_drawing_rect
102 static void prepare_binding_table_pointers(struct brw_context
*brw
)
104 brw_add_validated_bo(brw
, brw
->vs
.bind_bo
);
105 brw_add_validated_bo(brw
, brw
->wm
.bind_bo
);
109 * Upload the binding table pointers, which point each stage's array of surface
112 * The binding table pointers are relative to the surface state base address,
115 static void upload_binding_table_pointers(struct brw_context
*brw
)
117 struct intel_context
*intel
= &brw
->intel
;
119 BEGIN_BATCH(6, IGNORE_CLIPRECTS
);
120 OUT_BATCH(CMD_BINDING_TABLE_PTRS
<< 16 | (6 - 2));
121 OUT_RELOC(brw
->vs
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* vs */
122 OUT_BATCH(0); /* gs */
123 OUT_BATCH(0); /* clip */
124 OUT_BATCH(0); /* sf */
125 OUT_RELOC(brw
->wm
.bind_bo
, I915_GEM_DOMAIN_SAMPLER
, 0, 0); /* wm/ps */
129 const struct brw_tracked_state brw_binding_table_pointers
= {
132 .brw
= BRW_NEW_BATCH
,
133 .cache
= CACHE_NEW_SURF_BIND
,
135 .prepare
= prepare_binding_table_pointers
,
136 .emit
= upload_binding_table_pointers
,
141 * Upload pointers to the per-stage state.
143 * The state pointers in this packet are all relative to the general state
144 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
146 static void upload_pipelined_state_pointers(struct brw_context
*brw
)
148 struct intel_context
*intel
= &brw
->intel
;
150 BEGIN_BATCH(7, IGNORE_CLIPRECTS
);
151 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS
<< 16 | (7 - 2));
152 OUT_RELOC(brw
->vs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
153 if (brw
->gs
.prog_active
)
154 OUT_RELOC(brw
->gs
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
157 OUT_RELOC(brw
->clip
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 1);
158 OUT_RELOC(brw
->sf
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
159 OUT_RELOC(brw
->wm
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
160 OUT_RELOC(brw
->cc
.state_bo
, I915_GEM_DOMAIN_INSTRUCTION
, 0, 0);
163 brw
->state
.dirty
.brw
|= BRW_NEW_PSP
;
167 static void prepare_psp_urb_cbs(struct brw_context
*brw
)
169 brw_add_validated_bo(brw
, brw
->vs
.state_bo
);
170 brw_add_validated_bo(brw
, brw
->gs
.state_bo
);
171 brw_add_validated_bo(brw
, brw
->clip
.state_bo
);
172 brw_add_validated_bo(brw
, brw
->wm
.state_bo
);
173 brw_add_validated_bo(brw
, brw
->cc
.state_bo
);
176 static void upload_psp_urb_cbs(struct brw_context
*brw
)
178 upload_pipelined_state_pointers(brw
);
179 brw_upload_urb_fence(brw
);
180 brw_upload_cs_urb_state(brw
);
183 const struct brw_tracked_state brw_psp_urb_cbs
= {
186 .brw
= BRW_NEW_URB_FENCE
| BRW_NEW_BATCH
,
187 .cache
= (CACHE_NEW_VS_UNIT
|
190 CACHE_NEW_CLIP_UNIT
|
195 .prepare
= prepare_psp_urb_cbs
,
196 .emit
= upload_psp_urb_cbs
,
199 static void prepare_depthbuffer(struct brw_context
*brw
)
201 struct intel_region
*region
= brw
->state
.depth_region
;
204 brw_add_validated_bo(brw
, region
->buffer
);
207 static void emit_depthbuffer(struct brw_context
*brw
)
209 struct intel_context
*intel
= &brw
->intel
;
210 struct intel_region
*region
= brw
->state
.depth_region
;
211 unsigned int len
= BRW_IS_G4X(brw
) ? 6 : 5;
213 if (region
== NULL
) {
214 BEGIN_BATCH(len
, IGNORE_CLIPRECTS
);
215 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
216 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT
<< 18) |
217 (BRW_SURFACE_NULL
<< 29));
229 switch (region
->cpp
) {
231 format
= BRW_DEPTHFORMAT_D16_UNORM
;
234 if (intel
->depth_buffer_is_float
)
235 format
= BRW_DEPTHFORMAT_D32_FLOAT
;
237 format
= BRW_DEPTHFORMAT_D24_UNORM_S8_UINT
;
244 BEGIN_BATCH(len
, IGNORE_CLIPRECTS
);
245 OUT_BATCH(CMD_DEPTH_BUFFER
<< 16 | (len
- 2));
246 OUT_BATCH(((region
->pitch
* region
->cpp
) - 1) |
248 (BRW_TILEWALK_YMAJOR
<< 26) |
249 ((region
->tiling
!= I915_TILING_NONE
) << 27) |
250 (BRW_SURFACE_2D
<< 29));
251 OUT_RELOC(region
->buffer
,
252 I915_GEM_DOMAIN_RENDER
, I915_GEM_DOMAIN_RENDER
,
254 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW
<< 1) |
255 ((region
->pitch
- 1) << 6) |
256 ((region
->height
- 1) << 19));
266 const struct brw_tracked_state brw_depthbuffer
= {
269 .brw
= BRW_NEW_DEPTH_BUFFER
| BRW_NEW_BATCH
,
272 .prepare
= prepare_depthbuffer
,
273 .emit
= emit_depthbuffer
,
278 /***********************************************************************
279 * Polygon stipple packet
282 static void upload_polygon_stipple(struct brw_context
*brw
)
284 GLcontext
*ctx
= &brw
->intel
.ctx
;
285 struct brw_polygon_stipple bps
;
288 memset(&bps
, 0, sizeof(bps
));
289 bps
.header
.opcode
= CMD_POLY_STIPPLE_PATTERN
;
290 bps
.header
.length
= sizeof(bps
)/4-2;
292 /* Polygon stipple is provided in OpenGL order, i.e. bottom
293 * row first. If we're rendering to a window (i.e. the
294 * default frame buffer object, 0), then we need to invert
295 * it to match our pixel layout. But if we're rendering
296 * to a FBO (i.e. any named frame buffer object), we *don't*
297 * need to invert - we already match the layout.
299 if (ctx
->DrawBuffer
->Name
== 0) {
300 for (i
= 0; i
< 32; i
++)
301 bps
.stipple
[i
] = ctx
->PolygonStipple
[31 - i
]; /* invert */
304 for (i
= 0; i
< 32; i
++)
305 bps
.stipple
[i
] = ctx
->PolygonStipple
[i
]; /* don't invert */
308 BRW_CACHED_BATCH_STRUCT(brw
, &bps
);
311 const struct brw_tracked_state brw_polygon_stipple
= {
313 .mesa
= _NEW_POLYGONSTIPPLE
,
317 .emit
= upload_polygon_stipple
321 /***********************************************************************
322 * Polygon stipple offset packet
325 static void upload_polygon_stipple_offset(struct brw_context
*brw
)
327 __DRIdrawablePrivate
*dPriv
= brw
->intel
.driDrawable
;
328 struct brw_polygon_stipple_offset bpso
;
330 memset(&bpso
, 0, sizeof(bpso
));
331 bpso
.header
.opcode
= CMD_POLY_STIPPLE_OFFSET
;
332 bpso
.header
.length
= sizeof(bpso
)/4-2;
334 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
335 * we have to invert the Y axis in order to match the OpenGL
336 * pixel coordinate system, and our offset must be matched
337 * to the window position. If we're drawing to a FBO
338 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
339 * system works just fine, and there's no window system to
342 if (brw
->intel
.ctx
.DrawBuffer
->Name
== 0) {
343 bpso
.bits0
.x_offset
= (32 - (dPriv
->x
& 31)) & 31;
344 bpso
.bits0
.y_offset
= (32 - ((dPriv
->y
+ dPriv
->h
) & 31)) & 31;
347 bpso
.bits0
.y_offset
= 0;
348 bpso
.bits0
.x_offset
= 0;
351 BRW_CACHED_BATCH_STRUCT(brw
, &bpso
);
354 #define _NEW_WINDOW_POS 0x40000000
356 const struct brw_tracked_state brw_polygon_stipple_offset
= {
358 .mesa
= _NEW_WINDOW_POS
,
362 .emit
= upload_polygon_stipple_offset
365 /**********************************************************************
368 static void upload_aa_line_parameters(struct brw_context
*brw
)
370 struct brw_aa_line_parameters balp
;
372 if (!BRW_IS_G4X(brw
))
375 /* use legacy aa line coverage computation */
376 memset(&balp
, 0, sizeof(balp
));
377 balp
.header
.opcode
= CMD_AA_LINE_PARAMETERS
;
378 balp
.header
.length
= sizeof(balp
) / 4 - 2;
380 BRW_CACHED_BATCH_STRUCT(brw
, &balp
);
383 const struct brw_tracked_state brw_aa_line_parameters
= {
386 .brw
= BRW_NEW_CONTEXT
,
389 .emit
= upload_aa_line_parameters
392 /***********************************************************************
393 * Line stipple packet
396 static void upload_line_stipple(struct brw_context
*brw
)
398 GLcontext
*ctx
= &brw
->intel
.ctx
;
399 struct brw_line_stipple bls
;
403 memset(&bls
, 0, sizeof(bls
));
404 bls
.header
.opcode
= CMD_LINE_STIPPLE_PATTERN
;
405 bls
.header
.length
= sizeof(bls
)/4 - 2;
407 bls
.bits0
.pattern
= ctx
->Line
.StipplePattern
;
408 bls
.bits1
.repeat_count
= ctx
->Line
.StippleFactor
;
410 tmp
= 1.0 / (GLfloat
) ctx
->Line
.StippleFactor
;
411 tmpi
= tmp
* (1<<13);
414 bls
.bits1
.inverse_repeat_count
= tmpi
;
416 BRW_CACHED_BATCH_STRUCT(brw
, &bls
);
419 const struct brw_tracked_state brw_line_stipple
= {
425 .emit
= upload_line_stipple
429 /***********************************************************************
430 * Misc invarient state packets
433 static void upload_invarient_state( struct brw_context
*brw
)
436 /* 0x61040000 Pipeline Select */
437 /* PipelineSelect : 0 */
438 struct brw_pipeline_select ps
;
440 memset(&ps
, 0, sizeof(ps
));
441 ps
.header
.opcode
= CMD_PIPELINE_SELECT(brw
);
442 ps
.header
.pipeline_select
= 0;
443 BRW_BATCH_STRUCT(brw
, &ps
);
447 struct brw_global_depth_offset_clamp gdo
;
448 memset(&gdo
, 0, sizeof(gdo
));
450 /* Disable depth offset clamping.
452 gdo
.header
.opcode
= CMD_GLOBAL_DEPTH_OFFSET_CLAMP
;
453 gdo
.header
.length
= sizeof(gdo
)/4 - 2;
454 gdo
.depth_offset_clamp
= 0.0;
456 BRW_BATCH_STRUCT(brw
, &gdo
);
460 /* 0x61020000 State Instruction Pointer */
462 struct brw_system_instruction_pointer sip
;
463 memset(&sip
, 0, sizeof(sip
));
465 sip
.header
.opcode
= CMD_STATE_INSN_POINTER
;
466 sip
.header
.length
= 0;
468 sip
.bits0
.system_instruction_pointer
= 0;
469 BRW_BATCH_STRUCT(brw
, &sip
);
474 struct brw_vf_statistics vfs
;
475 memset(&vfs
, 0, sizeof(vfs
));
477 vfs
.opcode
= CMD_VF_STATISTICS(brw
);
478 if (INTEL_DEBUG
& DEBUG_STATS
)
479 vfs
.statistics_enable
= 1;
481 BRW_BATCH_STRUCT(brw
, &vfs
);
485 const struct brw_tracked_state brw_invarient_state
= {
488 .brw
= BRW_NEW_CONTEXT
,
491 .emit
= upload_invarient_state
495 * Define the base addresses which some state is referenced from.
497 * This allows us to avoid having to emit relocations in many places for
498 * cached state, and instead emit pointers inside of large, mostly-static
499 * state pools. This comes at the expense of memory, and more expensive cache
502 static void upload_state_base_address( struct brw_context
*brw
)
504 struct intel_context
*intel
= &brw
->intel
;
506 /* Output the structure (brw_state_base_address) directly to the
507 * batchbuffer, so we can emit relocations inline.
509 BEGIN_BATCH(6, IGNORE_CLIPRECTS
);
510 OUT_BATCH(CMD_STATE_BASE_ADDRESS
<< 16 | (6 - 2));
511 OUT_BATCH(1); /* General state base address */
512 OUT_BATCH(1); /* Surface state base address */
513 OUT_BATCH(1); /* Indirect object base address */
514 OUT_BATCH(1); /* General state upper bound */
515 OUT_BATCH(1); /* Indirect object upper bound */
519 const struct brw_tracked_state brw_state_base_address
= {
522 .brw
= BRW_NEW_CONTEXT
,
525 .emit
= upload_state_base_address