Merge remote branch 'vdpau/pipe-video' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41 /* Constant single cliprect for framebuffer object or DRI2 drawing */
42 static void upload_drawing_rect(struct brw_context *brw)
43 {
44 struct intel_context *intel = &brw->intel;
45 struct gl_context *ctx = &intel->ctx;
46
47 BEGIN_BATCH(4);
48 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
49 OUT_BATCH(0); /* xmin, ymin */
50 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
51 ((ctx->DrawBuffer->Height - 1) << 16));
52 OUT_BATCH(0);
53 ADVANCE_BATCH();
54 }
55
56 const struct brw_tracked_state brw_drawing_rect = {
57 .dirty = {
58 .mesa = _NEW_BUFFERS,
59 .brw = BRW_NEW_CONTEXT,
60 .cache = 0
61 },
62 .emit = upload_drawing_rect
63 };
64
65 /**
66 * Upload the binding table pointers, which point each stage's array of surface
67 * state pointers.
68 *
69 * The binding table pointers are relative to the surface state base address,
70 * which points at the batchbuffer containing the streamed batch state.
71 */
72 static void upload_binding_table_pointers(struct brw_context *brw)
73 {
74 struct intel_context *intel = &brw->intel;
75
76 BEGIN_BATCH(6);
77 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
78 OUT_BATCH(brw->vs.bind_bo_offset);
79 OUT_BATCH(0); /* gs */
80 OUT_BATCH(0); /* clip */
81 OUT_BATCH(0); /* sf */
82 OUT_BATCH(brw->wm.bind_bo_offset);
83 ADVANCE_BATCH();
84 }
85
86 const struct brw_tracked_state brw_binding_table_pointers = {
87 .dirty = {
88 .mesa = 0,
89 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
90 .cache = 0,
91 },
92 .emit = upload_binding_table_pointers,
93 };
94
95 /**
96 * Upload the binding table pointers, which point each stage's array of surface
97 * state pointers.
98 *
99 * The binding table pointers are relative to the surface state base address,
100 * which points at the batchbuffer containing the streamed batch state.
101 */
102 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
103 {
104 struct intel_context *intel = &brw->intel;
105
106 BEGIN_BATCH(4);
107 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 |
108 GEN6_BINDING_TABLE_MODIFY_VS |
109 GEN6_BINDING_TABLE_MODIFY_GS |
110 GEN6_BINDING_TABLE_MODIFY_PS |
111 (4 - 2));
112 OUT_BATCH(brw->vs.bind_bo_offset); /* vs */
113 OUT_BATCH(0); /* gs */
114 OUT_BATCH(brw->wm.bind_bo_offset); /* wm/ps */
115 ADVANCE_BATCH();
116 }
117
118 const struct brw_tracked_state gen6_binding_table_pointers = {
119 .dirty = {
120 .mesa = 0,
121 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
122 .cache = 0,
123 },
124 .emit = upload_gen6_binding_table_pointers,
125 };
126
127 /**
128 * Upload pointers to the per-stage state.
129 *
130 * The state pointers in this packet are all relative to the general state
131 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
132 */
133 static void upload_pipelined_state_pointers(struct brw_context *brw )
134 {
135 struct intel_context *intel = &brw->intel;
136
137 if (intel->gen == 5) {
138 /* Need to flush before changing clip max threads for errata. */
139 BEGIN_BATCH(1);
140 OUT_BATCH(MI_FLUSH);
141 ADVANCE_BATCH();
142 }
143
144 BEGIN_BATCH(7);
145 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
146 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
147 if (brw->gs.prog_active)
148 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
149 else
150 OUT_BATCH(0);
151 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
152 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
153 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
154 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
155 brw->cc.state_offset);
156 ADVANCE_BATCH();
157
158 brw->state.dirty.brw |= BRW_NEW_PSP;
159 }
160
161
162 static void prepare_psp_urb_cbs(struct brw_context *brw)
163 {
164 brw_add_validated_bo(brw, brw->vs.state_bo);
165 brw_add_validated_bo(brw, brw->gs.state_bo);
166 brw_add_validated_bo(brw, brw->clip.state_bo);
167 brw_add_validated_bo(brw, brw->sf.state_bo);
168 brw_add_validated_bo(brw, brw->wm.state_bo);
169 }
170
171 static void upload_psp_urb_cbs(struct brw_context *brw )
172 {
173 upload_pipelined_state_pointers(brw);
174 brw_upload_urb_fence(brw);
175 brw_upload_cs_urb_state(brw);
176 }
177
178 const struct brw_tracked_state brw_psp_urb_cbs = {
179 .dirty = {
180 .mesa = 0,
181 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
182 .cache = (CACHE_NEW_VS_UNIT |
183 CACHE_NEW_GS_UNIT |
184 CACHE_NEW_GS_PROG |
185 CACHE_NEW_CLIP_UNIT |
186 CACHE_NEW_SF_UNIT |
187 CACHE_NEW_WM_UNIT |
188 CACHE_NEW_CC_UNIT)
189 },
190 .prepare = prepare_psp_urb_cbs,
191 .emit = upload_psp_urb_cbs,
192 };
193
194 static void prepare_depthbuffer(struct brw_context *brw)
195 {
196 struct intel_region *region = brw->state.depth_region;
197
198 if (region != NULL)
199 brw_add_validated_bo(brw, region->buffer);
200 }
201
202 static void emit_depthbuffer(struct brw_context *brw)
203 {
204 struct intel_context *intel = &brw->intel;
205 struct intel_region *region = brw->state.depth_region;
206 unsigned int len;
207
208 if (intel->gen >= 6)
209 len = 7;
210 else if (intel->is_g4x || intel->gen == 5)
211 len = 6;
212 else
213 len = 5;
214
215 if (region == NULL) {
216 BEGIN_BATCH(len);
217 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
218 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
219 (BRW_SURFACE_NULL << 29));
220 OUT_BATCH(0);
221 OUT_BATCH(0);
222 OUT_BATCH(0);
223
224 if (intel->is_g4x || intel->gen >= 5)
225 OUT_BATCH(0);
226
227 if (intel->gen >= 6)
228 OUT_BATCH(0);
229
230 ADVANCE_BATCH();
231 } else {
232 unsigned int format;
233
234 switch (region->cpp) {
235 case 2:
236 format = BRW_DEPTHFORMAT_D16_UNORM;
237 break;
238 case 4:
239 if (intel->depth_buffer_is_float)
240 format = BRW_DEPTHFORMAT_D32_FLOAT;
241 else
242 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
243 break;
244 default:
245 assert(0);
246 return;
247 }
248
249 assert(region->tiling != I915_TILING_X);
250 if (intel->gen >= 6)
251 assert(region->tiling != I915_TILING_NONE);
252
253 BEGIN_BATCH(len);
254 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
255 OUT_BATCH(((region->pitch * region->cpp) - 1) |
256 (format << 18) |
257 (BRW_TILEWALK_YMAJOR << 26) |
258 ((region->tiling != I915_TILING_NONE) << 27) |
259 (BRW_SURFACE_2D << 29));
260 OUT_RELOC(region->buffer,
261 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
262 0);
263 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
264 ((region->width - 1) << 6) |
265 ((region->height - 1) << 19));
266 OUT_BATCH(0);
267
268 if (intel->is_g4x || intel->gen >= 5)
269 OUT_BATCH(0);
270
271 if (intel->gen >= 6)
272 OUT_BATCH(0);
273
274 ADVANCE_BATCH();
275 }
276
277 /* Initialize it for safety. */
278 if (intel->gen >= 6) {
279 BEGIN_BATCH(2);
280 OUT_BATCH(CMD_3D_CLEAR_PARAMS << 16 | (2 - 2));
281 OUT_BATCH(0);
282 ADVANCE_BATCH();
283 }
284 }
285
286 const struct brw_tracked_state brw_depthbuffer = {
287 .dirty = {
288 .mesa = 0,
289 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
290 .cache = 0,
291 },
292 .prepare = prepare_depthbuffer,
293 .emit = emit_depthbuffer,
294 };
295
296
297
298 /***********************************************************************
299 * Polygon stipple packet
300 */
301
302 static void upload_polygon_stipple(struct brw_context *brw)
303 {
304 struct gl_context *ctx = &brw->intel.ctx;
305 struct brw_polygon_stipple bps;
306 GLuint i;
307
308 if (!ctx->Polygon.StippleFlag)
309 return;
310
311 memset(&bps, 0, sizeof(bps));
312 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
313 bps.header.length = sizeof(bps)/4-2;
314
315 /* Polygon stipple is provided in OpenGL order, i.e. bottom
316 * row first. If we're rendering to a window (i.e. the
317 * default frame buffer object, 0), then we need to invert
318 * it to match our pixel layout. But if we're rendering
319 * to a FBO (i.e. any named frame buffer object), we *don't*
320 * need to invert - we already match the layout.
321 */
322 if (ctx->DrawBuffer->Name == 0) {
323 for (i = 0; i < 32; i++)
324 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
325 }
326 else {
327 for (i = 0; i < 32; i++)
328 bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
329 }
330
331 BRW_CACHED_BATCH_STRUCT(brw, &bps);
332 }
333
334 const struct brw_tracked_state brw_polygon_stipple = {
335 .dirty = {
336 .mesa = _NEW_POLYGONSTIPPLE,
337 .brw = BRW_NEW_CONTEXT,
338 .cache = 0
339 },
340 .emit = upload_polygon_stipple
341 };
342
343
344 /***********************************************************************
345 * Polygon stipple offset packet
346 */
347
348 static void upload_polygon_stipple_offset(struct brw_context *brw)
349 {
350 struct gl_context *ctx = &brw->intel.ctx;
351 struct brw_polygon_stipple_offset bpso;
352
353 if (!ctx->Polygon.StippleFlag)
354 return;
355
356 memset(&bpso, 0, sizeof(bpso));
357 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
358 bpso.header.length = sizeof(bpso)/4-2;
359
360 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
361 * we have to invert the Y axis in order to match the OpenGL
362 * pixel coordinate system, and our offset must be matched
363 * to the window position. If we're drawing to a FBO
364 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
365 * system works just fine, and there's no window system to
366 * worry about.
367 */
368 if (brw->intel.ctx.DrawBuffer->Name == 0) {
369 bpso.bits0.x_offset = 0;
370 bpso.bits0.y_offset = (32 - (ctx->DrawBuffer->Height & 31)) & 31;
371 }
372 else {
373 bpso.bits0.y_offset = 0;
374 bpso.bits0.x_offset = 0;
375 }
376
377 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
378 }
379
380 #define _NEW_WINDOW_POS 0x40000000
381
382 const struct brw_tracked_state brw_polygon_stipple_offset = {
383 .dirty = {
384 .mesa = _NEW_WINDOW_POS | _NEW_POLYGONSTIPPLE,
385 .brw = BRW_NEW_CONTEXT,
386 .cache = 0
387 },
388 .emit = upload_polygon_stipple_offset
389 };
390
391 /**********************************************************************
392 * AA Line parameters
393 */
394 static void upload_aa_line_parameters(struct brw_context *brw)
395 {
396 struct gl_context *ctx = &brw->intel.ctx;
397 struct brw_aa_line_parameters balp;
398
399 if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
400 return;
401
402 /* use legacy aa line coverage computation */
403 memset(&balp, 0, sizeof(balp));
404 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
405 balp.header.length = sizeof(balp) / 4 - 2;
406
407 BRW_CACHED_BATCH_STRUCT(brw, &balp);
408 }
409
410 const struct brw_tracked_state brw_aa_line_parameters = {
411 .dirty = {
412 .mesa = _NEW_LINE,
413 .brw = BRW_NEW_CONTEXT,
414 .cache = 0
415 },
416 .emit = upload_aa_line_parameters
417 };
418
419 /***********************************************************************
420 * Line stipple packet
421 */
422
423 static void upload_line_stipple(struct brw_context *brw)
424 {
425 struct gl_context *ctx = &brw->intel.ctx;
426 struct brw_line_stipple bls;
427 GLfloat tmp;
428 GLint tmpi;
429
430 if (!ctx->Line.StippleFlag)
431 return;
432
433 memset(&bls, 0, sizeof(bls));
434 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
435 bls.header.length = sizeof(bls)/4 - 2;
436
437 bls.bits0.pattern = ctx->Line.StipplePattern;
438 bls.bits1.repeat_count = ctx->Line.StippleFactor;
439
440 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
441 tmpi = tmp * (1<<13);
442
443
444 bls.bits1.inverse_repeat_count = tmpi;
445
446 BRW_CACHED_BATCH_STRUCT(brw, &bls);
447 }
448
449 const struct brw_tracked_state brw_line_stipple = {
450 .dirty = {
451 .mesa = _NEW_LINE,
452 .brw = BRW_NEW_CONTEXT,
453 .cache = 0
454 },
455 .emit = upload_line_stipple
456 };
457
458
459 /***********************************************************************
460 * Misc invarient state packets
461 */
462
463 static void upload_invarient_state( struct brw_context *brw )
464 {
465 struct intel_context *intel = &brw->intel;
466
467 {
468 /* 0x61040000 Pipeline Select */
469 /* PipelineSelect : 0 */
470 struct brw_pipeline_select ps;
471
472 memset(&ps, 0, sizeof(ps));
473 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
474 ps.header.pipeline_select = 0;
475 BRW_BATCH_STRUCT(brw, &ps);
476 }
477
478 if (intel->gen < 6) {
479 struct brw_global_depth_offset_clamp gdo;
480 memset(&gdo, 0, sizeof(gdo));
481
482 /* Disable depth offset clamping.
483 */
484 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
485 gdo.header.length = sizeof(gdo)/4 - 2;
486 gdo.depth_offset_clamp = 0.0;
487
488 BRW_BATCH_STRUCT(brw, &gdo);
489 }
490
491 if (intel->gen >= 6) {
492 int i;
493
494 BEGIN_BATCH(3);
495 OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (3 - 2));
496 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
497 MS_NUMSAMPLES_1);
498 OUT_BATCH(0); /* positions for 4/8-sample */
499 ADVANCE_BATCH();
500
501 BEGIN_BATCH(2);
502 OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
503 OUT_BATCH(1);
504 ADVANCE_BATCH();
505
506 for (i = 0; i < 4; i++) {
507 BEGIN_BATCH(4);
508 OUT_BATCH(CMD_GS_SVB_INDEX << 16 | (4 - 2));
509 OUT_BATCH(i << SVB_INDEX_SHIFT);
510 OUT_BATCH(0);
511 OUT_BATCH(0xffffffff);
512 ADVANCE_BATCH();
513 }
514 }
515
516 /* 0x61020000 State Instruction Pointer */
517 {
518 struct brw_system_instruction_pointer sip;
519 memset(&sip, 0, sizeof(sip));
520
521 sip.header.opcode = CMD_STATE_INSN_POINTER;
522 sip.header.length = 0;
523 sip.bits0.pad = 0;
524 sip.bits0.system_instruction_pointer = 0;
525 BRW_BATCH_STRUCT(brw, &sip);
526 }
527
528
529 {
530 struct brw_vf_statistics vfs;
531 memset(&vfs, 0, sizeof(vfs));
532
533 vfs.opcode = brw->CMD_VF_STATISTICS;
534 if (unlikely(INTEL_DEBUG & DEBUG_STATS))
535 vfs.statistics_enable = 1;
536
537 BRW_BATCH_STRUCT(brw, &vfs);
538 }
539 }
540
541 const struct brw_tracked_state brw_invarient_state = {
542 .dirty = {
543 .mesa = 0,
544 .brw = BRW_NEW_CONTEXT,
545 .cache = 0
546 },
547 .emit = upload_invarient_state
548 };
549
550 /**
551 * Define the base addresses which some state is referenced from.
552 *
553 * This allows us to avoid having to emit relocations for the objects,
554 * and is actually required for binding table pointers on gen6.
555 *
556 * Surface state base address covers binding table pointers and
557 * surface state objects, but not the surfaces that the surface state
558 * objects point to.
559 */
560 static void upload_state_base_address( struct brw_context *brw )
561 {
562 struct intel_context *intel = &brw->intel;
563
564 if (intel->gen >= 6) {
565 BEGIN_BATCH(10);
566 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
567 OUT_BATCH(1); /* General state base address */
568 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
569 1); /* Surface state base address */
570 OUT_BATCH(1); /* Dynamic state base address */
571 OUT_BATCH(1); /* Indirect object base address */
572 OUT_BATCH(1); /* Instruction base address */
573 OUT_BATCH(1); /* General state upper bound */
574 OUT_BATCH(1); /* Dynamic state upper bound */
575 OUT_BATCH(1); /* Indirect object upper bound */
576 OUT_BATCH(1); /* Instruction access upper bound */
577 ADVANCE_BATCH();
578 } else if (intel->gen == 5) {
579 BEGIN_BATCH(8);
580 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
581 OUT_BATCH(1); /* General state base address */
582 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
583 1); /* Surface state base address */
584 OUT_BATCH(1); /* Indirect object base address */
585 OUT_BATCH(1); /* Instruction base address */
586 OUT_BATCH(1); /* General state upper bound */
587 OUT_BATCH(1); /* Indirect object upper bound */
588 OUT_BATCH(1); /* Instruction access upper bound */
589 ADVANCE_BATCH();
590 } else {
591 BEGIN_BATCH(6);
592 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
593 OUT_BATCH(1); /* General state base address */
594 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
595 1); /* Surface state base address */
596 OUT_BATCH(1); /* Indirect object base address */
597 OUT_BATCH(1); /* General state upper bound */
598 OUT_BATCH(1); /* Indirect object upper bound */
599 ADVANCE_BATCH();
600 }
601 }
602
603 const struct brw_tracked_state brw_state_base_address = {
604 .dirty = {
605 .mesa = 0,
606 .brw = BRW_NEW_BATCH,
607 .cache = 0,
608 },
609 .emit = upload_state_base_address
610 };