i965: fixup depth buffer check
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .emit = upload_blend_constant_color
72 };
73
74 /**
75 * Upload the binding table pointers, which point each stage's array of surface
76 * state pointers.
77 *
78 * The binding table pointers are relative to the surface state base address,
79 * which is 0.
80 */
81 static void upload_binding_table_pointers(struct brw_context *brw)
82 {
83 struct intel_context *intel = &brw->intel;
84
85 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
86 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
87 OUT_BATCH(0); /* vs */
88 OUT_BATCH(0); /* gs */
89 OUT_BATCH(0); /* clip */
90 OUT_BATCH(0); /* sf */
91 OUT_RELOC(brw->wm.bind_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
92 ADVANCE_BATCH();
93 }
94
95 const struct brw_tracked_state brw_binding_table_pointers = {
96 .dirty = {
97 .mesa = 0,
98 .brw = BRW_NEW_BATCH,
99 .cache = CACHE_NEW_SURF_BIND,
100 },
101 .emit = upload_binding_table_pointers,
102 };
103
104
105 /**
106 * Upload pointers to the per-stage state.
107 *
108 * The state pointers in this packet are all relative to the general state
109 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
110 */
111 static void upload_pipelined_state_pointers(struct brw_context *brw )
112 {
113 struct intel_context *intel = &brw->intel;
114
115 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
116 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
117 OUT_RELOC(brw->vs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
118 if (brw->gs.prog_active)
119 OUT_RELOC(brw->gs.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 1);
120 else
121 OUT_BATCH(0);
122 if (!brw->metaops.active)
123 OUT_RELOC(brw->clip.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 1);
124 else
125 OUT_BATCH(0);
126 OUT_RELOC(brw->sf.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
127 OUT_RELOC(brw->wm.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
128 OUT_RELOC(brw->cc.state_bo, DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ, 0);
129 ADVANCE_BATCH();
130
131 brw->state.dirty.brw |= BRW_NEW_PSP;
132 }
133
134 #if 0
135 /* Combined into brw_psp_urb_cbs */
136 const struct brw_tracked_state brw_pipelined_state_pointers = {
137 .dirty = {
138 .mesa = 0,
139 .brw = BRW_NEW_METAOPS | BRW_NEW_BATCH,
140 .cache = (CACHE_NEW_VS_UNIT |
141 CACHE_NEW_GS_UNIT |
142 CACHE_NEW_GS_PROG |
143 CACHE_NEW_CLIP_UNIT |
144 CACHE_NEW_SF_UNIT |
145 CACHE_NEW_WM_UNIT |
146 CACHE_NEW_CC_UNIT)
147 },
148 .emit = upload_pipelined_state_pointers
149 };
150 #endif
151
152 static void upload_psp_urb_cbs(struct brw_context *brw )
153 {
154 upload_pipelined_state_pointers(brw);
155 brw_upload_urb_fence(brw);
156 brw_upload_constant_buffer_state(brw);
157 }
158
159
160 const struct brw_tracked_state brw_psp_urb_cbs = {
161 .dirty = {
162 .mesa = 0,
163 .brw = BRW_NEW_URB_FENCE | BRW_NEW_METAOPS | BRW_NEW_BATCH,
164 .cache = (CACHE_NEW_VS_UNIT |
165 CACHE_NEW_GS_UNIT |
166 CACHE_NEW_GS_PROG |
167 CACHE_NEW_CLIP_UNIT |
168 CACHE_NEW_SF_UNIT |
169 CACHE_NEW_WM_UNIT |
170 CACHE_NEW_CC_UNIT)
171 },
172 .emit = upload_psp_urb_cbs,
173 };
174
175 /**
176 * Upload the depthbuffer offset and format.
177 *
178 * We have to do this per state validation as we need to emit the relocation
179 * in the batch buffer.
180 */
181
182 static int prepare_depthbuffer(struct brw_context *brw)
183 {
184 struct intel_region *region = brw->state.depth_region;
185
186 if (!region || !region->buffer)
187 return 0;
188 return dri_bufmgr_check_aperture_space(region->buffer);
189 }
190
191 static void emit_depthbuffer(struct brw_context *brw)
192 {
193 struct intel_context *intel = &brw->intel;
194 struct intel_region *region = brw->state.depth_region;
195 unsigned int len = BRW_IS_IGD(brw) ? sizeof(struct brw_depthbuffer_igd) / 4 : sizeof(struct brw_depthbuffer) / 4;
196
197 if (region == NULL) {
198 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
199 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
200 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
201 (BRW_SURFACE_NULL << 29));
202 OUT_BATCH(0);
203 OUT_BATCH(0);
204 OUT_BATCH(0);
205
206 if (BRW_IS_IGD(brw))
207 OUT_BATCH(0);
208
209 ADVANCE_BATCH();
210 } else {
211 unsigned int format;
212
213 switch (region->cpp) {
214 case 2:
215 format = BRW_DEPTHFORMAT_D16_UNORM;
216 break;
217 case 4:
218 if (intel->depth_buffer_is_float)
219 format = BRW_DEPTHFORMAT_D32_FLOAT;
220 else
221 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
222 break;
223 default:
224 assert(0);
225 return;
226 }
227
228 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
229 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
230 OUT_BATCH(((region->pitch * region->cpp) - 1) |
231 (format << 18) |
232 (BRW_TILEWALK_YMAJOR << 26) |
233 (region->tiled << 27) |
234 (BRW_SURFACE_2D << 29));
235 OUT_RELOC(region->buffer,
236 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
237 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
238 ((region->pitch - 1) << 6) |
239 ((region->height - 1) << 19));
240 OUT_BATCH(0);
241
242 if (BRW_IS_IGD(brw))
243 OUT_BATCH(0);
244
245 ADVANCE_BATCH();
246 }
247 }
248
249 const struct brw_tracked_state brw_depthbuffer = {
250 .dirty = {
251 .mesa = 0,
252 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
253 .cache = 0,
254 },
255 .prepare = prepare_depthbuffer,
256 .emit = emit_depthbuffer,
257 };
258
259
260
261 /***********************************************************************
262 * Polygon stipple packet
263 */
264
265 static void upload_polygon_stipple(struct brw_context *brw)
266 {
267 struct brw_polygon_stipple bps;
268 GLuint i;
269
270 memset(&bps, 0, sizeof(bps));
271 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
272 bps.header.length = sizeof(bps)/4-2;
273
274 for (i = 0; i < 32; i++)
275 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
276
277 BRW_CACHED_BATCH_STRUCT(brw, &bps);
278 }
279
280 const struct brw_tracked_state brw_polygon_stipple = {
281 .dirty = {
282 .mesa = _NEW_POLYGONSTIPPLE,
283 .brw = 0,
284 .cache = 0
285 },
286 .emit = upload_polygon_stipple
287 };
288
289
290 /***********************************************************************
291 * Polygon stipple offset packet
292 */
293
294 static void upload_polygon_stipple_offset(struct brw_context *brw)
295 {
296 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
297 struct brw_polygon_stipple_offset bpso;
298
299 memset(&bpso, 0, sizeof(bpso));
300 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
301 bpso.header.length = sizeof(bpso)/4-2;
302
303 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
304 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
305
306 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
307 }
308
309 #define _NEW_WINDOW_POS 0x40000000
310
311 const struct brw_tracked_state brw_polygon_stipple_offset = {
312 .dirty = {
313 .mesa = _NEW_WINDOW_POS,
314 .brw = 0,
315 .cache = 0
316 },
317 .emit = upload_polygon_stipple_offset
318 };
319
320 /**********************************************************************
321 * AA Line parameters
322 */
323 static void upload_aa_line_parameters(struct brw_context *brw)
324 {
325 struct brw_aa_line_parameters balp;
326
327 if (!BRW_IS_IGD(brw))
328 return;
329
330 /* use legacy aa line coverage computation */
331 memset(&balp, 0, sizeof(balp));
332 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
333 balp.header.length = sizeof(balp) / 4 - 2;
334
335 BRW_CACHED_BATCH_STRUCT(brw, &balp);
336 }
337
338 const struct brw_tracked_state brw_aa_line_parameters = {
339 .dirty = {
340 .mesa = 0,
341 .brw = BRW_NEW_CONTEXT,
342 .cache = 0
343 },
344 .emit = upload_aa_line_parameters
345 };
346
347 /***********************************************************************
348 * Line stipple packet
349 */
350
351 static void upload_line_stipple(struct brw_context *brw)
352 {
353 struct brw_line_stipple bls;
354 GLfloat tmp;
355 GLint tmpi;
356
357 memset(&bls, 0, sizeof(bls));
358 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
359 bls.header.length = sizeof(bls)/4 - 2;
360
361 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
362 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
363
364 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
365 tmpi = tmp * (1<<13);
366
367
368 bls.bits1.inverse_repeat_count = tmpi;
369
370 BRW_CACHED_BATCH_STRUCT(brw, &bls);
371 }
372
373 const struct brw_tracked_state brw_line_stipple = {
374 .dirty = {
375 .mesa = _NEW_LINE,
376 .brw = 0,
377 .cache = 0
378 },
379 .emit = upload_line_stipple
380 };
381
382
383
384 /***********************************************************************
385 * Misc constant state packets
386 */
387
388 static void upload_pipe_control(struct brw_context *brw)
389 {
390 struct brw_pipe_control pc;
391
392 return;
393
394 memset(&pc, 0, sizeof(pc));
395
396 pc.header.opcode = CMD_PIPE_CONTROL;
397 pc.header.length = sizeof(pc)/4 - 2;
398 pc.header.post_sync_operation = PIPE_CONTROL_NOWRITE;
399
400 pc.header.instruction_state_cache_flush_enable = 1;
401
402 pc.bits1.dest_addr_type = PIPE_CONTROL_GTTWRITE_GLOBAL;
403
404 BRW_BATCH_STRUCT(brw, &pc);
405 }
406
407 const struct brw_tracked_state brw_pipe_control = {
408 .dirty = {
409 .mesa = 0,
410 .brw = BRW_NEW_BATCH,
411 .cache = 0
412 },
413 .emit = upload_pipe_control
414 };
415
416
417 /***********************************************************************
418 * Misc invarient state packets
419 */
420
421 static void upload_invarient_state( struct brw_context *brw )
422 {
423 {
424 /* 0x61040000 Pipeline Select */
425 /* PipelineSelect : 0 */
426 struct brw_pipeline_select ps;
427
428 memset(&ps, 0, sizeof(ps));
429 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
430 ps.header.pipeline_select = 0;
431 BRW_BATCH_STRUCT(brw, &ps);
432 }
433
434 {
435 struct brw_global_depth_offset_clamp gdo;
436 memset(&gdo, 0, sizeof(gdo));
437
438 /* Disable depth offset clamping.
439 */
440 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
441 gdo.header.length = sizeof(gdo)/4 - 2;
442 gdo.depth_offset_clamp = 0.0;
443
444 BRW_BATCH_STRUCT(brw, &gdo);
445 }
446
447
448 /* 0x61020000 State Instruction Pointer */
449 {
450 struct brw_system_instruction_pointer sip;
451 memset(&sip, 0, sizeof(sip));
452
453 sip.header.opcode = CMD_STATE_INSN_POINTER;
454 sip.header.length = 0;
455 sip.bits0.pad = 0;
456 sip.bits0.system_instruction_pointer = 0;
457 BRW_BATCH_STRUCT(brw, &sip);
458 }
459
460
461 {
462 struct brw_vf_statistics vfs;
463 memset(&vfs, 0, sizeof(vfs));
464
465 vfs.opcode = CMD_VF_STATISTICS(brw);
466 if (INTEL_DEBUG & DEBUG_STATS)
467 vfs.statistics_enable = 1;
468
469 BRW_BATCH_STRUCT(brw, &vfs);
470 }
471 }
472
473 const struct brw_tracked_state brw_invarient_state = {
474 .dirty = {
475 .mesa = 0,
476 .brw = BRW_NEW_CONTEXT,
477 .cache = 0
478 },
479 .emit = upload_invarient_state
480 };
481
482 /**
483 * Define the base addresses which some state is referenced from.
484 *
485 * This allows us to avoid having to emit relocations in many places for
486 * cached state, and instead emit pointers inside of large, mostly-static
487 * state pools. This comes at the expense of memory, and more expensive cache
488 * misses.
489 */
490 static void upload_state_base_address( struct brw_context *brw )
491 {
492 struct intel_context *intel = &brw->intel;
493
494 /* Output the structure (brw_state_base_address) directly to the
495 * batchbuffer, so we can emit relocations inline.
496 */
497 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
498 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
499 OUT_BATCH(1); /* General state base address */
500 OUT_BATCH(1); /* Surface state base address */
501 OUT_BATCH(1); /* Indirect object base address */
502 OUT_BATCH(1); /* General state upper bound */
503 OUT_BATCH(1); /* Indirect object upper bound */
504 ADVANCE_BATCH();
505 }
506
507 const struct brw_tracked_state brw_state_base_address = {
508 .dirty = {
509 .mesa = 0,
510 .brw = BRW_NEW_CONTEXT,
511 .cache = 0,
512 },
513 .emit = upload_state_base_address
514 };