Merge remote-tracking branch 'origin/master' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41 /* Constant single cliprect for framebuffer object or DRI2 drawing */
42 static void upload_drawing_rect(struct brw_context *brw)
43 {
44 struct intel_context *intel = &brw->intel;
45 struct gl_context *ctx = &intel->ctx;
46
47 BEGIN_BATCH(4);
48 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
49 OUT_BATCH(0); /* xmin, ymin */
50 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
51 ((ctx->DrawBuffer->Height - 1) << 16));
52 OUT_BATCH(0);
53 ADVANCE_BATCH();
54 }
55
56 const struct brw_tracked_state brw_drawing_rect = {
57 .dirty = {
58 .mesa = _NEW_BUFFERS,
59 .brw = BRW_NEW_CONTEXT,
60 .cache = 0
61 },
62 .emit = upload_drawing_rect
63 };
64
65 /**
66 * Upload the binding table pointers, which point each stage's array of surface
67 * state pointers.
68 *
69 * The binding table pointers are relative to the surface state base address,
70 * which points at the batchbuffer containing the streamed batch state.
71 */
72 static void upload_binding_table_pointers(struct brw_context *brw)
73 {
74 struct intel_context *intel = &brw->intel;
75
76 BEGIN_BATCH(6);
77 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
78 OUT_BATCH(brw->vs.bind_bo_offset);
79 OUT_BATCH(0); /* gs */
80 OUT_BATCH(0); /* clip */
81 OUT_BATCH(0); /* sf */
82 OUT_BATCH(brw->wm.bind_bo_offset);
83 ADVANCE_BATCH();
84 }
85
86 const struct brw_tracked_state brw_binding_table_pointers = {
87 .dirty = {
88 .mesa = 0,
89 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
90 .cache = 0,
91 },
92 .emit = upload_binding_table_pointers,
93 };
94
95 /**
96 * Upload the binding table pointers, which point each stage's array of surface
97 * state pointers.
98 *
99 * The binding table pointers are relative to the surface state base address,
100 * which points at the batchbuffer containing the streamed batch state.
101 */
102 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
103 {
104 struct intel_context *intel = &brw->intel;
105
106 BEGIN_BATCH(4);
107 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
108 GEN6_BINDING_TABLE_MODIFY_VS |
109 GEN6_BINDING_TABLE_MODIFY_GS |
110 GEN6_BINDING_TABLE_MODIFY_PS |
111 (4 - 2));
112 OUT_BATCH(brw->vs.bind_bo_offset); /* vs */
113 OUT_BATCH(0); /* gs */
114 OUT_BATCH(brw->wm.bind_bo_offset); /* wm/ps */
115 ADVANCE_BATCH();
116 }
117
118 const struct brw_tracked_state gen6_binding_table_pointers = {
119 .dirty = {
120 .mesa = 0,
121 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
122 .cache = 0,
123 },
124 .emit = upload_gen6_binding_table_pointers,
125 };
126
127 /**
128 * Upload pointers to the per-stage state.
129 *
130 * The state pointers in this packet are all relative to the general state
131 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
132 */
133 static void upload_pipelined_state_pointers(struct brw_context *brw )
134 {
135 struct intel_context *intel = &brw->intel;
136
137 if (intel->gen == 5) {
138 /* Need to flush before changing clip max threads for errata. */
139 BEGIN_BATCH(1);
140 OUT_BATCH(MI_FLUSH);
141 ADVANCE_BATCH();
142 }
143
144 BEGIN_BATCH(7);
145 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
146 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
147 brw->vs.state_offset);
148 if (brw->gs.prog_active)
149 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
150 brw->gs.state_offset | 1);
151 else
152 OUT_BATCH(0);
153 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
154 brw->clip.state_offset | 1);
155 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
156 brw->sf.state_offset);
157 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
158 brw->wm.state_offset);
159 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
160 brw->cc.state_offset);
161 ADVANCE_BATCH();
162
163 brw->state.dirty.brw |= BRW_NEW_PSP;
164 }
165
166 static void upload_psp_urb_cbs(struct brw_context *brw )
167 {
168 upload_pipelined_state_pointers(brw);
169 brw_upload_urb_fence(brw);
170 brw_upload_cs_urb_state(brw);
171 }
172
173 const struct brw_tracked_state brw_psp_urb_cbs = {
174 .dirty = {
175 .mesa = 0,
176 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
177 .cache = (CACHE_NEW_VS_UNIT |
178 CACHE_NEW_GS_UNIT |
179 CACHE_NEW_GS_PROG |
180 CACHE_NEW_CLIP_UNIT |
181 CACHE_NEW_SF_UNIT |
182 CACHE_NEW_WM_UNIT |
183 CACHE_NEW_CC_UNIT)
184 },
185 .emit = upload_psp_urb_cbs,
186 };
187
188 static void prepare_depthbuffer(struct brw_context *brw)
189 {
190 struct intel_region *region = brw->state.depth_region;
191
192 if (region != NULL)
193 brw_add_validated_bo(brw, region->buffer);
194 }
195
196 static void emit_depthbuffer(struct brw_context *brw)
197 {
198 struct intel_context *intel = &brw->intel;
199 struct intel_region *region = brw->state.depth_region;
200 unsigned int len;
201
202 if (intel->gen >= 6)
203 len = 7;
204 else if (intel->is_g4x || intel->gen == 5)
205 len = 6;
206 else
207 len = 5;
208
209 if (region == NULL) {
210 BEGIN_BATCH(len);
211 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
212 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
213 (BRW_SURFACE_NULL << 29));
214 OUT_BATCH(0);
215 OUT_BATCH(0);
216 OUT_BATCH(0);
217
218 if (intel->is_g4x || intel->gen >= 5)
219 OUT_BATCH(0);
220
221 if (intel->gen >= 6)
222 OUT_BATCH(0);
223
224 ADVANCE_BATCH();
225 } else {
226 unsigned int format;
227
228 switch (region->cpp) {
229 case 2:
230 format = BRW_DEPTHFORMAT_D16_UNORM;
231 break;
232 case 4:
233 if (intel->depth_buffer_is_float)
234 format = BRW_DEPTHFORMAT_D32_FLOAT;
235 else
236 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
237 break;
238 default:
239 assert(0);
240 return;
241 }
242
243 assert(region->tiling != I915_TILING_X);
244 assert(intel->gen < 6 || region->tiling == I915_TILING_Y);
245
246 BEGIN_BATCH(len);
247 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
248 OUT_BATCH(((region->pitch * region->cpp) - 1) |
249 (format << 18) |
250 (BRW_TILEWALK_YMAJOR << 26) |
251 ((region->tiling != I915_TILING_NONE) << 27) |
252 (BRW_SURFACE_2D << 29));
253 OUT_RELOC(region->buffer,
254 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
255 0);
256 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
257 ((region->width - 1) << 6) |
258 ((region->height - 1) << 19));
259 OUT_BATCH(0);
260
261 if (intel->is_g4x || intel->gen >= 5)
262 OUT_BATCH(0);
263
264 if (intel->gen >= 6)
265 OUT_BATCH(0);
266
267 ADVANCE_BATCH();
268 }
269
270 /* Initialize it for safety. */
271 if (intel->gen >= 6) {
272 BEGIN_BATCH(2);
273 OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 | (2 - 2));
274 OUT_BATCH(0);
275 ADVANCE_BATCH();
276 }
277 }
278
279 /**
280 * \see brw_context.state.depth_region
281 */
282 const struct brw_tracked_state brw_depthbuffer = {
283 .dirty = {
284 .mesa = 0,
285 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
286 .cache = 0,
287 },
288 .prepare = prepare_depthbuffer,
289 .emit = emit_depthbuffer,
290 };
291
292
293
294 /***********************************************************************
295 * Polygon stipple packet
296 */
297
298 static void upload_polygon_stipple(struct brw_context *brw)
299 {
300 struct intel_context *intel = &brw->intel;
301 struct gl_context *ctx = &brw->intel.ctx;
302 GLuint i;
303
304 if (!ctx->Polygon.StippleFlag)
305 return;
306
307 BEGIN_BATCH(33);
308 OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
309
310 /* Polygon stipple is provided in OpenGL order, i.e. bottom
311 * row first. If we're rendering to a window (i.e. the
312 * default frame buffer object, 0), then we need to invert
313 * it to match our pixel layout. But if we're rendering
314 * to a FBO (i.e. any named frame buffer object), we *don't*
315 * need to invert - we already match the layout.
316 */
317 if (ctx->DrawBuffer->Name == 0) {
318 for (i = 0; i < 32; i++)
319 OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
320 }
321 else {
322 for (i = 0; i < 32; i++)
323 OUT_BATCH(ctx->PolygonStipple[i]);
324 }
325 CACHED_BATCH();
326 }
327
328 const struct brw_tracked_state brw_polygon_stipple = {
329 .dirty = {
330 .mesa = _NEW_POLYGONSTIPPLE,
331 .brw = BRW_NEW_CONTEXT,
332 .cache = 0
333 },
334 .emit = upload_polygon_stipple
335 };
336
337
338 /***********************************************************************
339 * Polygon stipple offset packet
340 */
341
342 static void upload_polygon_stipple_offset(struct brw_context *brw)
343 {
344 struct intel_context *intel = &brw->intel;
345 struct gl_context *ctx = &brw->intel.ctx;
346
347 if (!ctx->Polygon.StippleFlag)
348 return;
349
350 BEGIN_BATCH(2);
351 OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
352
353 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
354 * we have to invert the Y axis in order to match the OpenGL
355 * pixel coordinate system, and our offset must be matched
356 * to the window position. If we're drawing to a FBO
357 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
358 * system works just fine, and there's no window system to
359 * worry about.
360 */
361 if (brw->intel.ctx.DrawBuffer->Name == 0)
362 OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
363 else
364 OUT_BATCH(0);
365 CACHED_BATCH();
366 }
367
368 #define _NEW_WINDOW_POS 0x40000000
369
370 const struct brw_tracked_state brw_polygon_stipple_offset = {
371 .dirty = {
372 .mesa = _NEW_WINDOW_POS | _NEW_POLYGONSTIPPLE,
373 .brw = BRW_NEW_CONTEXT,
374 .cache = 0
375 },
376 .emit = upload_polygon_stipple_offset
377 };
378
379 /**********************************************************************
380 * AA Line parameters
381 */
382 static void upload_aa_line_parameters(struct brw_context *brw)
383 {
384 struct intel_context *intel = &brw->intel;
385 struct gl_context *ctx = &brw->intel.ctx;
386
387 if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
388 return;
389
390 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
391 /* use legacy aa line coverage computation */
392 OUT_BATCH(0);
393 OUT_BATCH(0);
394 CACHED_BATCH();
395 }
396
397 const struct brw_tracked_state brw_aa_line_parameters = {
398 .dirty = {
399 .mesa = _NEW_LINE,
400 .brw = BRW_NEW_CONTEXT,
401 .cache = 0
402 },
403 .emit = upload_aa_line_parameters
404 };
405
406 /***********************************************************************
407 * Line stipple packet
408 */
409
410 static void upload_line_stipple(struct brw_context *brw)
411 {
412 struct intel_context *intel = &brw->intel;
413 struct gl_context *ctx = &brw->intel.ctx;
414 GLfloat tmp;
415 GLint tmpi;
416
417 if (!ctx->Line.StippleFlag)
418 return;
419
420 BEGIN_BATCH(3);
421 OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
422 OUT_BATCH(ctx->Line.StipplePattern);
423 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
424 tmpi = tmp * (1<<13);
425 OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
426 CACHED_BATCH();
427 }
428
429 const struct brw_tracked_state brw_line_stipple = {
430 .dirty = {
431 .mesa = _NEW_LINE,
432 .brw = BRW_NEW_CONTEXT,
433 .cache = 0
434 },
435 .emit = upload_line_stipple
436 };
437
438
439 /***********************************************************************
440 * Misc invarient state packets
441 */
442
443 static void upload_invarient_state( struct brw_context *brw )
444 {
445 struct intel_context *intel = &brw->intel;
446
447 {
448 /* 0x61040000 Pipeline Select */
449 /* PipelineSelect : 0 */
450 struct brw_pipeline_select ps;
451
452 memset(&ps, 0, sizeof(ps));
453 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
454 ps.header.pipeline_select = 0;
455 BRW_BATCH_STRUCT(brw, &ps);
456 }
457
458 if (intel->gen < 6) {
459 struct brw_global_depth_offset_clamp gdo;
460 memset(&gdo, 0, sizeof(gdo));
461
462 /* Disable depth offset clamping.
463 */
464 gdo.header.opcode = _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP;
465 gdo.header.length = sizeof(gdo)/4 - 2;
466 gdo.depth_offset_clamp = 0.0;
467
468 BRW_BATCH_STRUCT(brw, &gdo);
469 }
470
471 if (intel->gen >= 6) {
472 int i;
473
474 BEGIN_BATCH(3);
475 OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (3 - 2));
476 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
477 MS_NUMSAMPLES_1);
478 OUT_BATCH(0); /* positions for 4/8-sample */
479 ADVANCE_BATCH();
480
481 BEGIN_BATCH(2);
482 OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
483 OUT_BATCH(1);
484 ADVANCE_BATCH();
485
486 for (i = 0; i < 4; i++) {
487 BEGIN_BATCH(4);
488 OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
489 OUT_BATCH(i << SVB_INDEX_SHIFT);
490 OUT_BATCH(0);
491 OUT_BATCH(0xffffffff);
492 ADVANCE_BATCH();
493 }
494 }
495
496 /* 0x61020000 State Instruction Pointer */
497 {
498 struct brw_system_instruction_pointer sip;
499 memset(&sip, 0, sizeof(sip));
500
501 sip.header.opcode = CMD_STATE_INSN_POINTER;
502 sip.header.length = 0;
503 sip.bits0.pad = 0;
504 sip.bits0.system_instruction_pointer = 0;
505 BRW_BATCH_STRUCT(brw, &sip);
506 }
507
508
509 {
510 struct brw_vf_statistics vfs;
511 memset(&vfs, 0, sizeof(vfs));
512
513 vfs.opcode = brw->CMD_VF_STATISTICS;
514 if (unlikely(INTEL_DEBUG & DEBUG_STATS))
515 vfs.statistics_enable = 1;
516
517 BRW_BATCH_STRUCT(brw, &vfs);
518 }
519 }
520
521 const struct brw_tracked_state brw_invarient_state = {
522 .dirty = {
523 .mesa = 0,
524 .brw = BRW_NEW_CONTEXT,
525 .cache = 0
526 },
527 .emit = upload_invarient_state
528 };
529
530 /**
531 * Define the base addresses which some state is referenced from.
532 *
533 * This allows us to avoid having to emit relocations for the objects,
534 * and is actually required for binding table pointers on gen6.
535 *
536 * Surface state base address covers binding table pointers and
537 * surface state objects, but not the surfaces that the surface state
538 * objects point to.
539 */
540 static void upload_state_base_address( struct brw_context *brw )
541 {
542 struct intel_context *intel = &brw->intel;
543
544 if (intel->gen >= 6) {
545 BEGIN_BATCH(10);
546 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
547 /* General state base address: stateless DP read/write requests */
548 OUT_BATCH(1);
549 /* Surface state base address:
550 * BINDING_TABLE_STATE
551 * SURFACE_STATE
552 */
553 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
554 /* Dynamic state base address:
555 * SAMPLER_STATE
556 * SAMPLER_BORDER_COLOR_STATE
557 * CLIP, SF, WM/CC viewport state
558 * COLOR_CALC_STATE
559 * DEPTH_STENCIL_STATE
560 * BLEND_STATE
561 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
562 * Disable is clear, which we rely on)
563 */
564 OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
565 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
566
567 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
568 OUT_BATCH(1); /* Instruction base address: shader kernels (incl. SIP) */
569 OUT_BATCH(1); /* General state upper bound */
570 OUT_BATCH(1); /* Dynamic state upper bound */
571 OUT_BATCH(1); /* Indirect object upper bound */
572 OUT_BATCH(1); /* Instruction access upper bound */
573 ADVANCE_BATCH();
574 } else if (intel->gen == 5) {
575 BEGIN_BATCH(8);
576 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
577 OUT_BATCH(1); /* General state base address */
578 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
579 1); /* Surface state base address */
580 OUT_BATCH(1); /* Indirect object base address */
581 OUT_BATCH(1); /* Instruction base address */
582 OUT_BATCH(1); /* General state upper bound */
583 OUT_BATCH(1); /* Indirect object upper bound */
584 OUT_BATCH(1); /* Instruction access upper bound */
585 ADVANCE_BATCH();
586 } else {
587 BEGIN_BATCH(6);
588 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
589 OUT_BATCH(1); /* General state base address */
590 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
591 1); /* Surface state base address */
592 OUT_BATCH(1); /* Indirect object base address */
593 OUT_BATCH(1); /* General state upper bound */
594 OUT_BATCH(1); /* Indirect object upper bound */
595 ADVANCE_BATCH();
596 }
597 }
598
599 const struct brw_tracked_state brw_state_base_address = {
600 .dirty = {
601 .mesa = 0,
602 .brw = BRW_NEW_BATCH,
603 .cache = 0,
604 },
605 .emit = upload_state_base_address
606 };