f74ecf53a750e1e05125c650bcdbbb009d9090ce
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .emit = upload_blend_constant_color
72 };
73
74 /* Constant single cliprect for framebuffer object or DRI2 drawing */
75 static void upload_drawing_rect(struct brw_context *brw)
76 {
77 struct intel_context *intel = &brw->intel;
78 GLcontext *ctx = &intel->ctx;
79
80 if (!intel->constant_cliprect)
81 return;
82
83 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
84 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
85 OUT_BATCH(0); /* xmin, ymin */
86 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
87 ((ctx->DrawBuffer->Height - 1) << 16));
88 OUT_BATCH(0);
89 ADVANCE_BATCH();
90 }
91
92 const struct brw_tracked_state brw_drawing_rect = {
93 .dirty = {
94 .mesa = _NEW_BUFFERS,
95 .brw = 0,
96 .cache = 0
97 },
98 .emit = upload_drawing_rect
99 };
100
101 static void prepare_binding_table_pointers(struct brw_context *brw)
102 {
103 brw_add_validated_bo(brw, brw->wm.bind_bo);
104 }
105
106 /**
107 * Upload the binding table pointers, which point each stage's array of surface
108 * state pointers.
109 *
110 * The binding table pointers are relative to the surface state base address,
111 * which is 0.
112 */
113 static void upload_binding_table_pointers(struct brw_context *brw)
114 {
115 struct intel_context *intel = &brw->intel;
116
117 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
118 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
119 OUT_BATCH(0); /* vs */
120 OUT_BATCH(0); /* gs */
121 OUT_BATCH(0); /* clip */
122 OUT_BATCH(0); /* sf */
123 OUT_RELOC(brw->wm.bind_bo,
124 I915_GEM_DOMAIN_SAMPLER, 0,
125 0);
126 ADVANCE_BATCH();
127 }
128
129 const struct brw_tracked_state brw_binding_table_pointers = {
130 .dirty = {
131 .mesa = 0,
132 .brw = BRW_NEW_BATCH,
133 .cache = CACHE_NEW_SURF_BIND,
134 },
135 .prepare = prepare_binding_table_pointers,
136 .emit = upload_binding_table_pointers,
137 };
138
139
140 /**
141 * Upload pointers to the per-stage state.
142 *
143 * The state pointers in this packet are all relative to the general state
144 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
145 */
146 static void upload_pipelined_state_pointers(struct brw_context *brw )
147 {
148 struct intel_context *intel = &brw->intel;
149
150 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
151 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
152 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
153 if (brw->gs.prog_active)
154 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
155 else
156 OUT_BATCH(0);
157 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
158 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
159 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
160 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
161 ADVANCE_BATCH();
162
163 brw->state.dirty.brw |= BRW_NEW_PSP;
164 }
165
166
167 static void prepare_psp_urb_cbs(struct brw_context *brw)
168 {
169 brw_add_validated_bo(brw, brw->vs.state_bo);
170 brw_add_validated_bo(brw, brw->gs.state_bo);
171 brw_add_validated_bo(brw, brw->clip.state_bo);
172 brw_add_validated_bo(brw, brw->wm.state_bo);
173 brw_add_validated_bo(brw, brw->cc.state_bo);
174 }
175
176 static void upload_psp_urb_cbs(struct brw_context *brw )
177 {
178 upload_pipelined_state_pointers(brw);
179 brw_upload_urb_fence(brw);
180 brw_upload_constant_buffer_state(brw);
181 }
182
183 const struct brw_tracked_state brw_psp_urb_cbs = {
184 .dirty = {
185 .mesa = 0,
186 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
187 .cache = (CACHE_NEW_VS_UNIT |
188 CACHE_NEW_GS_UNIT |
189 CACHE_NEW_GS_PROG |
190 CACHE_NEW_CLIP_UNIT |
191 CACHE_NEW_SF_UNIT |
192 CACHE_NEW_WM_UNIT |
193 CACHE_NEW_CC_UNIT)
194 },
195 .prepare = prepare_psp_urb_cbs,
196 .emit = upload_psp_urb_cbs,
197 };
198
199 static void prepare_depthbuffer(struct brw_context *brw)
200 {
201 struct intel_region *region = brw->state.depth_region;
202
203 if (region != NULL)
204 brw_add_validated_bo(brw, region->buffer);
205 }
206
207 static void emit_depthbuffer(struct brw_context *brw)
208 {
209 struct intel_context *intel = &brw->intel;
210 struct intel_region *region = brw->state.depth_region;
211 unsigned int len = BRW_IS_G4X(brw) ? 6 : 5;
212
213 if (region == NULL) {
214 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
215 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
216 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
217 (BRW_SURFACE_NULL << 29));
218 OUT_BATCH(0);
219 OUT_BATCH(0);
220 OUT_BATCH(0);
221
222 if (BRW_IS_G4X(brw))
223 OUT_BATCH(0);
224
225 ADVANCE_BATCH();
226 } else {
227 unsigned int format;
228
229 switch (region->cpp) {
230 case 2:
231 format = BRW_DEPTHFORMAT_D16_UNORM;
232 break;
233 case 4:
234 if (intel->depth_buffer_is_float)
235 format = BRW_DEPTHFORMAT_D32_FLOAT;
236 else
237 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
238 break;
239 default:
240 assert(0);
241 return;
242 }
243
244 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
245 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
246 OUT_BATCH(((region->pitch * region->cpp) - 1) |
247 (format << 18) |
248 (BRW_TILEWALK_YMAJOR << 26) |
249 ((region->tiling != I915_TILING_NONE) << 27) |
250 (BRW_SURFACE_2D << 29));
251 OUT_RELOC(region->buffer,
252 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
253 0);
254 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
255 ((region->pitch - 1) << 6) |
256 ((region->height - 1) << 19));
257 OUT_BATCH(0);
258
259 if (BRW_IS_G4X(brw))
260 OUT_BATCH(0);
261
262 ADVANCE_BATCH();
263 }
264 }
265
266 const struct brw_tracked_state brw_depthbuffer = {
267 .dirty = {
268 .mesa = 0,
269 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
270 .cache = 0,
271 },
272 .prepare = prepare_depthbuffer,
273 .emit = emit_depthbuffer,
274 };
275
276
277
278 /***********************************************************************
279 * Polygon stipple packet
280 */
281
282 static void upload_polygon_stipple(struct brw_context *brw)
283 {
284 struct brw_polygon_stipple bps;
285 GLuint i;
286
287 memset(&bps, 0, sizeof(bps));
288 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
289 bps.header.length = sizeof(bps)/4-2;
290
291 for (i = 0; i < 32; i++)
292 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
293
294 BRW_CACHED_BATCH_STRUCT(brw, &bps);
295 }
296
297 const struct brw_tracked_state brw_polygon_stipple = {
298 .dirty = {
299 .mesa = _NEW_POLYGONSTIPPLE,
300 .brw = 0,
301 .cache = 0
302 },
303 .emit = upload_polygon_stipple
304 };
305
306
307 /***********************************************************************
308 * Polygon stipple offset packet
309 */
310
311 static void upload_polygon_stipple_offset(struct brw_context *brw)
312 {
313 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
314 struct brw_polygon_stipple_offset bpso;
315
316 memset(&bpso, 0, sizeof(bpso));
317 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
318 bpso.header.length = sizeof(bpso)/4-2;
319
320 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
321 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
322
323 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
324 }
325
326 #define _NEW_WINDOW_POS 0x40000000
327
328 const struct brw_tracked_state brw_polygon_stipple_offset = {
329 .dirty = {
330 .mesa = _NEW_WINDOW_POS,
331 .brw = 0,
332 .cache = 0
333 },
334 .emit = upload_polygon_stipple_offset
335 };
336
337 /**********************************************************************
338 * AA Line parameters
339 */
340 static void upload_aa_line_parameters(struct brw_context *brw)
341 {
342 struct brw_aa_line_parameters balp;
343
344 if (!BRW_IS_G4X(brw))
345 return;
346
347 /* use legacy aa line coverage computation */
348 memset(&balp, 0, sizeof(balp));
349 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
350 balp.header.length = sizeof(balp) / 4 - 2;
351
352 BRW_CACHED_BATCH_STRUCT(brw, &balp);
353 }
354
355 const struct brw_tracked_state brw_aa_line_parameters = {
356 .dirty = {
357 .mesa = 0,
358 .brw = BRW_NEW_CONTEXT,
359 .cache = 0
360 },
361 .emit = upload_aa_line_parameters
362 };
363
364 /***********************************************************************
365 * Line stipple packet
366 */
367
368 static void upload_line_stipple(struct brw_context *brw)
369 {
370 struct brw_line_stipple bls;
371 GLfloat tmp;
372 GLint tmpi;
373
374 memset(&bls, 0, sizeof(bls));
375 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
376 bls.header.length = sizeof(bls)/4 - 2;
377
378 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
379 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
380
381 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
382 tmpi = tmp * (1<<13);
383
384
385 bls.bits1.inverse_repeat_count = tmpi;
386
387 BRW_CACHED_BATCH_STRUCT(brw, &bls);
388 }
389
390 const struct brw_tracked_state brw_line_stipple = {
391 .dirty = {
392 .mesa = _NEW_LINE,
393 .brw = 0,
394 .cache = 0
395 },
396 .emit = upload_line_stipple
397 };
398
399
400 /***********************************************************************
401 * Misc invarient state packets
402 */
403
404 static void upload_invarient_state( struct brw_context *brw )
405 {
406 {
407 /* 0x61040000 Pipeline Select */
408 /* PipelineSelect : 0 */
409 struct brw_pipeline_select ps;
410
411 memset(&ps, 0, sizeof(ps));
412 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
413 ps.header.pipeline_select = 0;
414 BRW_BATCH_STRUCT(brw, &ps);
415 }
416
417 {
418 struct brw_global_depth_offset_clamp gdo;
419 memset(&gdo, 0, sizeof(gdo));
420
421 /* Disable depth offset clamping.
422 */
423 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
424 gdo.header.length = sizeof(gdo)/4 - 2;
425 gdo.depth_offset_clamp = 0.0;
426
427 BRW_BATCH_STRUCT(brw, &gdo);
428 }
429
430
431 /* 0x61020000 State Instruction Pointer */
432 {
433 struct brw_system_instruction_pointer sip;
434 memset(&sip, 0, sizeof(sip));
435
436 sip.header.opcode = CMD_STATE_INSN_POINTER;
437 sip.header.length = 0;
438 sip.bits0.pad = 0;
439 sip.bits0.system_instruction_pointer = 0;
440 BRW_BATCH_STRUCT(brw, &sip);
441 }
442
443
444 {
445 struct brw_vf_statistics vfs;
446 memset(&vfs, 0, sizeof(vfs));
447
448 vfs.opcode = CMD_VF_STATISTICS(brw);
449 if (INTEL_DEBUG & DEBUG_STATS)
450 vfs.statistics_enable = 1;
451
452 BRW_BATCH_STRUCT(brw, &vfs);
453 }
454 }
455
456 const struct brw_tracked_state brw_invarient_state = {
457 .dirty = {
458 .mesa = 0,
459 .brw = BRW_NEW_CONTEXT,
460 .cache = 0
461 },
462 .emit = upload_invarient_state
463 };
464
465 /**
466 * Define the base addresses which some state is referenced from.
467 *
468 * This allows us to avoid having to emit relocations in many places for
469 * cached state, and instead emit pointers inside of large, mostly-static
470 * state pools. This comes at the expense of memory, and more expensive cache
471 * misses.
472 */
473 static void upload_state_base_address( struct brw_context *brw )
474 {
475 struct intel_context *intel = &brw->intel;
476
477 /* Output the structure (brw_state_base_address) directly to the
478 * batchbuffer, so we can emit relocations inline.
479 */
480 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
481 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
482 OUT_BATCH(1); /* General state base address */
483 OUT_BATCH(1); /* Surface state base address */
484 OUT_BATCH(1); /* Indirect object base address */
485 OUT_BATCH(1); /* General state upper bound */
486 OUT_BATCH(1); /* Indirect object upper bound */
487 ADVANCE_BATCH();
488 }
489
490 const struct brw_tracked_state brw_state_base_address = {
491 .dirty = {
492 .mesa = 0,
493 .brw = BRW_NEW_CONTEXT,
494 .cache = 0,
495 },
496 .emit = upload_state_base_address
497 };