5bba8c84ec081d81d4cd59549667ff63feca7636
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .emit = upload_blend_constant_color
72 };
73
74 /* Constant single cliprect for framebuffer object or DRI2 drawing */
75 static void upload_drawing_rect(struct brw_context *brw)
76 {
77 struct intel_context *intel = &brw->intel;
78 GLcontext *ctx = &intel->ctx;
79
80 if (!intel->constant_cliprect)
81 return;
82
83 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
84 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
85 OUT_BATCH(0); /* xmin, ymin */
86 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
87 ((ctx->DrawBuffer->Height - 1) << 16));
88 OUT_BATCH(0);
89 ADVANCE_BATCH();
90 }
91
92 const struct brw_tracked_state brw_drawing_rect = {
93 .dirty = {
94 .mesa = _NEW_BUFFERS,
95 .brw = 0,
96 .cache = 0
97 },
98 .emit = upload_drawing_rect
99 };
100
101 static void prepare_binding_table_pointers(struct brw_context *brw)
102 {
103 brw_add_validated_bo(brw, brw->wm.bind_bo);
104 }
105
106 /**
107 * Upload the binding table pointers, which point each stage's array of surface
108 * state pointers.
109 *
110 * The binding table pointers are relative to the surface state base address,
111 * which is 0.
112 */
113 static void upload_binding_table_pointers(struct brw_context *brw)
114 {
115 struct intel_context *intel = &brw->intel;
116
117 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
118 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
119 OUT_BATCH(0); /* vs */
120 OUT_BATCH(0); /* gs */
121 OUT_BATCH(0); /* clip */
122 OUT_BATCH(0); /* sf */
123 OUT_RELOC(brw->wm.bind_bo,
124 I915_GEM_DOMAIN_SAMPLER, 0,
125 0);
126 ADVANCE_BATCH();
127 }
128
129 const struct brw_tracked_state brw_binding_table_pointers = {
130 .dirty = {
131 .mesa = 0,
132 .brw = BRW_NEW_BATCH,
133 .cache = CACHE_NEW_SURF_BIND,
134 },
135 .prepare = prepare_binding_table_pointers,
136 .emit = upload_binding_table_pointers,
137 };
138
139
140 /**
141 * Upload pointers to the per-stage state.
142 *
143 * The state pointers in this packet are all relative to the general state
144 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
145 */
146 static void upload_pipelined_state_pointers(struct brw_context *brw )
147 {
148 struct intel_context *intel = &brw->intel;
149
150 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
151 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
152 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
153 if (brw->gs.prog_active)
154 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
155 else
156 OUT_BATCH(0);
157 if (!brw->metaops.active)
158 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
159 else
160 OUT_BATCH(0);
161 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
162 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
163 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
164 ADVANCE_BATCH();
165
166 brw->state.dirty.brw |= BRW_NEW_PSP;
167 }
168
169
170 static void prepare_psp_urb_cbs(struct brw_context *brw)
171 {
172 brw_add_validated_bo(brw, brw->vs.state_bo);
173 brw_add_validated_bo(brw, brw->gs.state_bo);
174 brw_add_validated_bo(brw, brw->clip.state_bo);
175 brw_add_validated_bo(brw, brw->wm.state_bo);
176 brw_add_validated_bo(brw, brw->cc.state_bo);
177 }
178
179 static void upload_psp_urb_cbs(struct brw_context *brw )
180 {
181 upload_pipelined_state_pointers(brw);
182 brw_upload_urb_fence(brw);
183 brw_upload_constant_buffer_state(brw);
184 }
185
186 const struct brw_tracked_state brw_psp_urb_cbs = {
187 .dirty = {
188 .mesa = 0,
189 .brw = BRW_NEW_URB_FENCE | BRW_NEW_METAOPS | BRW_NEW_BATCH,
190 .cache = (CACHE_NEW_VS_UNIT |
191 CACHE_NEW_GS_UNIT |
192 CACHE_NEW_GS_PROG |
193 CACHE_NEW_CLIP_UNIT |
194 CACHE_NEW_SF_UNIT |
195 CACHE_NEW_WM_UNIT |
196 CACHE_NEW_CC_UNIT)
197 },
198 .prepare = prepare_psp_urb_cbs,
199 .emit = upload_psp_urb_cbs,
200 };
201
202 static void prepare_depthbuffer(struct brw_context *brw)
203 {
204 struct intel_region *region = brw->state.depth_region;
205
206 if (region != NULL)
207 brw_add_validated_bo(brw, region->buffer);
208 }
209
210 static void emit_depthbuffer(struct brw_context *brw)
211 {
212 struct intel_context *intel = &brw->intel;
213 struct intel_region *region = brw->state.depth_region;
214 unsigned int len = (BRW_IS_GM45(brw) || BRW_IS_G4X(brw)) ? sizeof(struct brw_depthbuffer_gm45_g4x) / 4 : sizeof(struct brw_depthbuffer) / 4;
215
216 if (region == NULL) {
217 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
218 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
219 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
220 (BRW_SURFACE_NULL << 29));
221 OUT_BATCH(0);
222 OUT_BATCH(0);
223 OUT_BATCH(0);
224
225 if (BRW_IS_GM45(brw) || BRW_IS_G4X(brw))
226 OUT_BATCH(0);
227
228 ADVANCE_BATCH();
229 } else {
230 unsigned int format;
231
232 switch (region->cpp) {
233 case 2:
234 format = BRW_DEPTHFORMAT_D16_UNORM;
235 break;
236 case 4:
237 if (intel->depth_buffer_is_float)
238 format = BRW_DEPTHFORMAT_D32_FLOAT;
239 else
240 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
241 break;
242 default:
243 assert(0);
244 return;
245 }
246
247 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
248 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
249 OUT_BATCH(((region->pitch * region->cpp) - 1) |
250 (format << 18) |
251 (BRW_TILEWALK_YMAJOR << 26) |
252 ((region->tiling != I915_TILING_NONE) << 27) |
253 (BRW_SURFACE_2D << 29));
254 OUT_RELOC(region->buffer,
255 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
256 0);
257 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
258 ((region->pitch - 1) << 6) |
259 ((region->height - 1) << 19));
260 OUT_BATCH(0);
261
262 if (BRW_IS_GM45(brw) || BRW_IS_G4X(brw))
263 OUT_BATCH(0);
264
265 ADVANCE_BATCH();
266 }
267 }
268
269 const struct brw_tracked_state brw_depthbuffer = {
270 .dirty = {
271 .mesa = 0,
272 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
273 .cache = 0,
274 },
275 .prepare = prepare_depthbuffer,
276 .emit = emit_depthbuffer,
277 };
278
279
280
281 /***********************************************************************
282 * Polygon stipple packet
283 */
284
285 static void upload_polygon_stipple(struct brw_context *brw)
286 {
287 struct brw_polygon_stipple bps;
288 GLuint i;
289
290 memset(&bps, 0, sizeof(bps));
291 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
292 bps.header.length = sizeof(bps)/4-2;
293
294 for (i = 0; i < 32; i++)
295 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
296
297 BRW_CACHED_BATCH_STRUCT(brw, &bps);
298 }
299
300 const struct brw_tracked_state brw_polygon_stipple = {
301 .dirty = {
302 .mesa = _NEW_POLYGONSTIPPLE,
303 .brw = 0,
304 .cache = 0
305 },
306 .emit = upload_polygon_stipple
307 };
308
309
310 /***********************************************************************
311 * Polygon stipple offset packet
312 */
313
314 static void upload_polygon_stipple_offset(struct brw_context *brw)
315 {
316 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
317 struct brw_polygon_stipple_offset bpso;
318
319 memset(&bpso, 0, sizeof(bpso));
320 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
321 bpso.header.length = sizeof(bpso)/4-2;
322
323 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
324 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
325
326 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
327 }
328
329 #define _NEW_WINDOW_POS 0x40000000
330
331 const struct brw_tracked_state brw_polygon_stipple_offset = {
332 .dirty = {
333 .mesa = _NEW_WINDOW_POS,
334 .brw = 0,
335 .cache = 0
336 },
337 .emit = upload_polygon_stipple_offset
338 };
339
340 /**********************************************************************
341 * AA Line parameters
342 */
343 static void upload_aa_line_parameters(struct brw_context *brw)
344 {
345 struct brw_aa_line_parameters balp;
346
347 if (!(BRW_IS_GM45(brw) || BRW_IS_G4X(brw)))
348 return;
349
350 /* use legacy aa line coverage computation */
351 memset(&balp, 0, sizeof(balp));
352 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
353 balp.header.length = sizeof(balp) / 4 - 2;
354
355 BRW_CACHED_BATCH_STRUCT(brw, &balp);
356 }
357
358 const struct brw_tracked_state brw_aa_line_parameters = {
359 .dirty = {
360 .mesa = 0,
361 .brw = BRW_NEW_CONTEXT,
362 .cache = 0
363 },
364 .emit = upload_aa_line_parameters
365 };
366
367 /***********************************************************************
368 * Line stipple packet
369 */
370
371 static void upload_line_stipple(struct brw_context *brw)
372 {
373 struct brw_line_stipple bls;
374 GLfloat tmp;
375 GLint tmpi;
376
377 memset(&bls, 0, sizeof(bls));
378 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
379 bls.header.length = sizeof(bls)/4 - 2;
380
381 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
382 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
383
384 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
385 tmpi = tmp * (1<<13);
386
387
388 bls.bits1.inverse_repeat_count = tmpi;
389
390 BRW_CACHED_BATCH_STRUCT(brw, &bls);
391 }
392
393 const struct brw_tracked_state brw_line_stipple = {
394 .dirty = {
395 .mesa = _NEW_LINE,
396 .brw = 0,
397 .cache = 0
398 },
399 .emit = upload_line_stipple
400 };
401
402
403 /***********************************************************************
404 * Misc invarient state packets
405 */
406
407 static void upload_invarient_state( struct brw_context *brw )
408 {
409 {
410 /* 0x61040000 Pipeline Select */
411 /* PipelineSelect : 0 */
412 struct brw_pipeline_select ps;
413
414 memset(&ps, 0, sizeof(ps));
415 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
416 ps.header.pipeline_select = 0;
417 BRW_BATCH_STRUCT(brw, &ps);
418 }
419
420 {
421 struct brw_global_depth_offset_clamp gdo;
422 memset(&gdo, 0, sizeof(gdo));
423
424 /* Disable depth offset clamping.
425 */
426 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
427 gdo.header.length = sizeof(gdo)/4 - 2;
428 gdo.depth_offset_clamp = 0.0;
429
430 BRW_BATCH_STRUCT(brw, &gdo);
431 }
432
433
434 /* 0x61020000 State Instruction Pointer */
435 {
436 struct brw_system_instruction_pointer sip;
437 memset(&sip, 0, sizeof(sip));
438
439 sip.header.opcode = CMD_STATE_INSN_POINTER;
440 sip.header.length = 0;
441 sip.bits0.pad = 0;
442 sip.bits0.system_instruction_pointer = 0;
443 BRW_BATCH_STRUCT(brw, &sip);
444 }
445
446
447 {
448 struct brw_vf_statistics vfs;
449 memset(&vfs, 0, sizeof(vfs));
450
451 vfs.opcode = CMD_VF_STATISTICS(brw);
452 if (INTEL_DEBUG & DEBUG_STATS)
453 vfs.statistics_enable = 1;
454
455 BRW_BATCH_STRUCT(brw, &vfs);
456 }
457 }
458
459 const struct brw_tracked_state brw_invarient_state = {
460 .dirty = {
461 .mesa = 0,
462 .brw = BRW_NEW_CONTEXT,
463 .cache = 0
464 },
465 .emit = upload_invarient_state
466 };
467
468 /**
469 * Define the base addresses which some state is referenced from.
470 *
471 * This allows us to avoid having to emit relocations in many places for
472 * cached state, and instead emit pointers inside of large, mostly-static
473 * state pools. This comes at the expense of memory, and more expensive cache
474 * misses.
475 */
476 static void upload_state_base_address( struct brw_context *brw )
477 {
478 struct intel_context *intel = &brw->intel;
479
480 /* Output the structure (brw_state_base_address) directly to the
481 * batchbuffer, so we can emit relocations inline.
482 */
483 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
484 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
485 OUT_BATCH(1); /* General state base address */
486 OUT_BATCH(1); /* Surface state base address */
487 OUT_BATCH(1); /* Indirect object base address */
488 OUT_BATCH(1); /* General state upper bound */
489 OUT_BATCH(1); /* Indirect object upper bound */
490 ADVANCE_BATCH();
491 }
492
493 const struct brw_tracked_state brw_state_base_address = {
494 .dirty = {
495 .mesa = 0,
496 .brw = BRW_NEW_CONTEXT,
497 .cache = 0,
498 },
499 .emit = upload_state_base_address
500 };