i965: Hook up remaining Sandybridge state packets besides WM.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 GLcontext *ctx = &brw->intel.ctx;
52 struct brw_blend_constant_color bcc;
53
54 memset(&bcc, 0, sizeof(bcc));
55 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
56 bcc.header.length = sizeof(bcc)/4-2;
57 bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
58 bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
59 bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
60 bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
61
62 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
63 }
64
65
66 const struct brw_tracked_state brw_blend_constant_color = {
67 .dirty = {
68 .mesa = _NEW_COLOR,
69 .brw = BRW_NEW_CONTEXT,
70 .cache = 0
71 },
72 .emit = upload_blend_constant_color
73 };
74
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context *brw)
77 {
78 struct intel_context *intel = &brw->intel;
79 GLcontext *ctx = &intel->ctx;
80
81 BEGIN_BATCH(4);
82 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
83 OUT_BATCH(0); /* xmin, ymin */
84 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
85 ((ctx->DrawBuffer->Height - 1) << 16));
86 OUT_BATCH(0);
87 ADVANCE_BATCH();
88 }
89
90 const struct brw_tracked_state brw_drawing_rect = {
91 .dirty = {
92 .mesa = _NEW_BUFFERS,
93 .brw = BRW_NEW_CONTEXT,
94 .cache = 0
95 },
96 .emit = upload_drawing_rect
97 };
98
99 static void prepare_binding_table_pointers(struct brw_context *brw)
100 {
101 brw_add_validated_bo(brw, brw->vs.bind_bo);
102 brw_add_validated_bo(brw, brw->wm.bind_bo);
103 }
104
105 /**
106 * Upload the binding table pointers, which point each stage's array of surface
107 * state pointers.
108 *
109 * The binding table pointers are relative to the surface state base address,
110 * which is 0.
111 */
112 static void upload_binding_table_pointers(struct brw_context *brw)
113 {
114 struct intel_context *intel = &brw->intel;
115
116 BEGIN_BATCH(6);
117 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
118 if (brw->vs.bind_bo != NULL)
119 OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
120 else
121 OUT_BATCH(0);
122 OUT_BATCH(0); /* gs */
123 OUT_BATCH(0); /* clip */
124 OUT_BATCH(0); /* sf */
125 OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
126 ADVANCE_BATCH();
127 }
128
129 const struct brw_tracked_state brw_binding_table_pointers = {
130 .dirty = {
131 .mesa = 0,
132 .brw = BRW_NEW_BATCH,
133 .cache = CACHE_NEW_SURF_BIND,
134 },
135 .prepare = prepare_binding_table_pointers,
136 .emit = upload_binding_table_pointers,
137 };
138
139 /**
140 * Upload the binding table pointers, which point each stage's array of surface
141 * state pointers.
142 *
143 * The binding table pointers are relative to the surface state base address,
144 * which is 0.
145 */
146 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
147 {
148 struct intel_context *intel = &brw->intel;
149
150 BEGIN_BATCH(4);
151 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 |
152 GEN6_BINDING_TABLE_MODIFY_VS |
153 GEN6_BINDING_TABLE_MODIFY_GS |
154 GEN6_BINDING_TABLE_MODIFY_PS |
155 (4 - 2));
156 if (brw->vs.bind_bo != NULL)
157 OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
158 else
159 OUT_BATCH(0);
160 OUT_BATCH(0); /* gs */
161 OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
162 ADVANCE_BATCH();
163 }
164
165 const struct brw_tracked_state gen6_binding_table_pointers = {
166 .dirty = {
167 .mesa = 0,
168 .brw = BRW_NEW_BATCH,
169 .cache = CACHE_NEW_SURF_BIND,
170 },
171 .prepare = prepare_binding_table_pointers,
172 .emit = upload_gen6_binding_table_pointers,
173 };
174
175 /**
176 * Upload pointers to the per-stage state.
177 *
178 * The state pointers in this packet are all relative to the general state
179 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
180 */
181 static void upload_pipelined_state_pointers(struct brw_context *brw )
182 {
183 struct intel_context *intel = &brw->intel;
184
185 BEGIN_BATCH(7);
186 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
187 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
188 if (brw->gs.prog_active)
189 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
190 else
191 OUT_BATCH(0);
192 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
193 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
194 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
195 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
196 ADVANCE_BATCH();
197
198 brw->state.dirty.brw |= BRW_NEW_PSP;
199 }
200
201
202 static void prepare_psp_urb_cbs(struct brw_context *brw)
203 {
204 brw_add_validated_bo(brw, brw->vs.state_bo);
205 brw_add_validated_bo(brw, brw->gs.state_bo);
206 brw_add_validated_bo(brw, brw->clip.state_bo);
207 brw_add_validated_bo(brw, brw->sf.state_bo);
208 brw_add_validated_bo(brw, brw->wm.state_bo);
209 brw_add_validated_bo(brw, brw->cc.state_bo);
210 }
211
212 static void upload_psp_urb_cbs(struct brw_context *brw )
213 {
214 upload_pipelined_state_pointers(brw);
215 brw_upload_urb_fence(brw);
216 brw_upload_cs_urb_state(brw);
217 }
218
219 const struct brw_tracked_state brw_psp_urb_cbs = {
220 .dirty = {
221 .mesa = 0,
222 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
223 .cache = (CACHE_NEW_VS_UNIT |
224 CACHE_NEW_GS_UNIT |
225 CACHE_NEW_GS_PROG |
226 CACHE_NEW_CLIP_UNIT |
227 CACHE_NEW_SF_UNIT |
228 CACHE_NEW_WM_UNIT |
229 CACHE_NEW_CC_UNIT)
230 },
231 .prepare = prepare_psp_urb_cbs,
232 .emit = upload_psp_urb_cbs,
233 };
234
235 static void prepare_depthbuffer(struct brw_context *brw)
236 {
237 struct intel_region *region = brw->state.depth_region;
238
239 if (region != NULL)
240 brw_add_validated_bo(brw, region->buffer);
241 }
242
243 static void emit_depthbuffer(struct brw_context *brw)
244 {
245 struct intel_context *intel = &brw->intel;
246 struct intel_region *region = brw->state.depth_region;
247 unsigned int len;
248
249 if (intel->gen >= 6)
250 len = 7;
251 else if (intel->is_g4x || intel->is_ironlake)
252 len = 6;
253 else
254 len = 5;
255
256 if (region == NULL) {
257 BEGIN_BATCH(len);
258 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
259 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
260 (BRW_SURFACE_NULL << 29));
261 OUT_BATCH(0);
262 OUT_BATCH(0);
263 OUT_BATCH(0);
264
265 if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
266 OUT_BATCH(0);
267
268 if (intel->gen >= 6)
269 OUT_BATCH(0);
270
271 ADVANCE_BATCH();
272 } else {
273 unsigned int format;
274
275 switch (region->cpp) {
276 case 2:
277 format = BRW_DEPTHFORMAT_D16_UNORM;
278 break;
279 case 4:
280 if (intel->depth_buffer_is_float)
281 format = BRW_DEPTHFORMAT_D32_FLOAT;
282 else
283 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
284 break;
285 default:
286 assert(0);
287 return;
288 }
289
290 assert(region->tiling != I915_TILING_X);
291 if (IS_GEN6(intel->intelScreen->deviceID))
292 assert(region->tiling != I915_TILING_NONE);
293
294 BEGIN_BATCH(len);
295 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
296 OUT_BATCH(((region->pitch * region->cpp) - 1) |
297 (format << 18) |
298 (BRW_TILEWALK_YMAJOR << 26) |
299 ((region->tiling != I915_TILING_NONE) << 27) |
300 (BRW_SURFACE_2D << 29));
301 OUT_RELOC(region->buffer,
302 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
303 0);
304 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
305 ((region->pitch - 1) << 6) |
306 ((region->height - 1) << 19));
307 OUT_BATCH(0);
308
309 if (intel->is_g4x || intel->is_ironlake || intel->gen >= 6)
310 OUT_BATCH(0);
311
312 if (intel->gen >= 6)
313 OUT_BATCH(0);
314
315 ADVANCE_BATCH();
316 }
317 }
318
319 const struct brw_tracked_state brw_depthbuffer = {
320 .dirty = {
321 .mesa = 0,
322 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
323 .cache = 0,
324 },
325 .prepare = prepare_depthbuffer,
326 .emit = emit_depthbuffer,
327 };
328
329
330
331 /***********************************************************************
332 * Polygon stipple packet
333 */
334
335 static void upload_polygon_stipple(struct brw_context *brw)
336 {
337 GLcontext *ctx = &brw->intel.ctx;
338 struct brw_polygon_stipple bps;
339 GLuint i;
340
341 memset(&bps, 0, sizeof(bps));
342 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
343 bps.header.length = sizeof(bps)/4-2;
344
345 /* Polygon stipple is provided in OpenGL order, i.e. bottom
346 * row first. If we're rendering to a window (i.e. the
347 * default frame buffer object, 0), then we need to invert
348 * it to match our pixel layout. But if we're rendering
349 * to a FBO (i.e. any named frame buffer object), we *don't*
350 * need to invert - we already match the layout.
351 */
352 if (ctx->DrawBuffer->Name == 0) {
353 for (i = 0; i < 32; i++)
354 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
355 }
356 else {
357 for (i = 0; i < 32; i++)
358 bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
359 }
360
361 BRW_CACHED_BATCH_STRUCT(brw, &bps);
362 }
363
364 const struct brw_tracked_state brw_polygon_stipple = {
365 .dirty = {
366 .mesa = _NEW_POLYGONSTIPPLE,
367 .brw = BRW_NEW_CONTEXT,
368 .cache = 0
369 },
370 .emit = upload_polygon_stipple
371 };
372
373
374 /***********************************************************************
375 * Polygon stipple offset packet
376 */
377
378 static void upload_polygon_stipple_offset(struct brw_context *brw)
379 {
380 GLcontext *ctx = &brw->intel.ctx;
381 struct brw_polygon_stipple_offset bpso;
382
383 memset(&bpso, 0, sizeof(bpso));
384 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
385 bpso.header.length = sizeof(bpso)/4-2;
386
387 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
388 * we have to invert the Y axis in order to match the OpenGL
389 * pixel coordinate system, and our offset must be matched
390 * to the window position. If we're drawing to a FBO
391 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
392 * system works just fine, and there's no window system to
393 * worry about.
394 */
395 if (brw->intel.ctx.DrawBuffer->Name == 0) {
396 bpso.bits0.x_offset = 0;
397 bpso.bits0.y_offset = (32 - (ctx->DrawBuffer->Height & 31)) & 31;
398 }
399 else {
400 bpso.bits0.y_offset = 0;
401 bpso.bits0.x_offset = 0;
402 }
403
404 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
405 }
406
407 #define _NEW_WINDOW_POS 0x40000000
408
409 const struct brw_tracked_state brw_polygon_stipple_offset = {
410 .dirty = {
411 .mesa = _NEW_WINDOW_POS,
412 .brw = BRW_NEW_CONTEXT,
413 .cache = 0
414 },
415 .emit = upload_polygon_stipple_offset
416 };
417
418 /**********************************************************************
419 * AA Line parameters
420 */
421 static void upload_aa_line_parameters(struct brw_context *brw)
422 {
423 struct brw_aa_line_parameters balp;
424
425 if (!brw->has_aa_line_parameters)
426 return;
427
428 /* use legacy aa line coverage computation */
429 memset(&balp, 0, sizeof(balp));
430 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
431 balp.header.length = sizeof(balp) / 4 - 2;
432
433 BRW_CACHED_BATCH_STRUCT(brw, &balp);
434 }
435
436 const struct brw_tracked_state brw_aa_line_parameters = {
437 .dirty = {
438 .mesa = 0,
439 .brw = BRW_NEW_CONTEXT,
440 .cache = 0
441 },
442 .emit = upload_aa_line_parameters
443 };
444
445 /***********************************************************************
446 * Line stipple packet
447 */
448
449 static void upload_line_stipple(struct brw_context *brw)
450 {
451 GLcontext *ctx = &brw->intel.ctx;
452 struct brw_line_stipple bls;
453 GLfloat tmp;
454 GLint tmpi;
455
456 memset(&bls, 0, sizeof(bls));
457 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
458 bls.header.length = sizeof(bls)/4 - 2;
459
460 bls.bits0.pattern = ctx->Line.StipplePattern;
461 bls.bits1.repeat_count = ctx->Line.StippleFactor;
462
463 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
464 tmpi = tmp * (1<<13);
465
466
467 bls.bits1.inverse_repeat_count = tmpi;
468
469 BRW_CACHED_BATCH_STRUCT(brw, &bls);
470 }
471
472 const struct brw_tracked_state brw_line_stipple = {
473 .dirty = {
474 .mesa = _NEW_LINE,
475 .brw = BRW_NEW_CONTEXT,
476 .cache = 0
477 },
478 .emit = upload_line_stipple
479 };
480
481
482 /***********************************************************************
483 * Misc invarient state packets
484 */
485
486 static void upload_invarient_state( struct brw_context *brw )
487 {
488 struct intel_context *intel = &brw->intel;
489
490 {
491 /* 0x61040000 Pipeline Select */
492 /* PipelineSelect : 0 */
493 struct brw_pipeline_select ps;
494
495 memset(&ps, 0, sizeof(ps));
496 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
497 ps.header.pipeline_select = 0;
498 BRW_BATCH_STRUCT(brw, &ps);
499 }
500
501 if (intel->gen < 6) {
502 struct brw_global_depth_offset_clamp gdo;
503 memset(&gdo, 0, sizeof(gdo));
504
505 /* Disable depth offset clamping.
506 */
507 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
508 gdo.header.length = sizeof(gdo)/4 - 2;
509 gdo.depth_offset_clamp = 0.0;
510
511 BRW_BATCH_STRUCT(brw, &gdo);
512 }
513
514
515 /* 0x61020000 State Instruction Pointer */
516 {
517 struct brw_system_instruction_pointer sip;
518 memset(&sip, 0, sizeof(sip));
519
520 sip.header.opcode = CMD_STATE_INSN_POINTER;
521 sip.header.length = 0;
522 sip.bits0.pad = 0;
523 sip.bits0.system_instruction_pointer = 0;
524 BRW_BATCH_STRUCT(brw, &sip);
525 }
526
527
528 {
529 struct brw_vf_statistics vfs;
530 memset(&vfs, 0, sizeof(vfs));
531
532 vfs.opcode = brw->CMD_VF_STATISTICS;
533 if (INTEL_DEBUG & DEBUG_STATS)
534 vfs.statistics_enable = 1;
535
536 BRW_BATCH_STRUCT(brw, &vfs);
537 }
538 }
539
540 const struct brw_tracked_state brw_invarient_state = {
541 .dirty = {
542 .mesa = 0,
543 .brw = BRW_NEW_CONTEXT,
544 .cache = 0
545 },
546 .emit = upload_invarient_state
547 };
548
549 /**
550 * Define the base addresses which some state is referenced from.
551 *
552 * This allows us to avoid having to emit relocations in many places for
553 * cached state, and instead emit pointers inside of large, mostly-static
554 * state pools. This comes at the expense of memory, and more expensive cache
555 * misses.
556 */
557 static void upload_state_base_address( struct brw_context *brw )
558 {
559 struct intel_context *intel = &brw->intel;
560
561 /* Output the structure (brw_state_base_address) directly to the
562 * batchbuffer, so we can emit relocations inline.
563 */
564 if (intel->gen >= 6) {
565 BEGIN_BATCH(10);
566 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
567 OUT_BATCH(1); /* General state base address */
568 OUT_BATCH(1); /* Surface state base address */
569 OUT_BATCH(1); /* Dynamic state base address */
570 OUT_BATCH(1); /* Indirect object base address */
571 OUT_BATCH(1); /* Instruction base address */
572 OUT_BATCH(1); /* General state upper bound */
573 OUT_BATCH(1); /* Dynamic state upper bound */
574 OUT_BATCH(1); /* Indirect object upper bound */
575 OUT_BATCH(1); /* Instruction access upper bound */
576 ADVANCE_BATCH();
577 } else if (intel->is_ironlake) {
578 BEGIN_BATCH(8);
579 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
580 OUT_BATCH(1); /* General state base address */
581 OUT_BATCH(1); /* Surface state base address */
582 OUT_BATCH(1); /* Indirect object base address */
583 OUT_BATCH(1); /* Instruction base address */
584 OUT_BATCH(1); /* General state upper bound */
585 OUT_BATCH(1); /* Indirect object upper bound */
586 OUT_BATCH(1); /* Instruction access upper bound */
587 ADVANCE_BATCH();
588 } else {
589 BEGIN_BATCH(6);
590 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
591 OUT_BATCH(1); /* General state base address */
592 OUT_BATCH(1); /* Surface state base address */
593 OUT_BATCH(1); /* Indirect object base address */
594 OUT_BATCH(1); /* General state upper bound */
595 OUT_BATCH(1); /* Indirect object upper bound */
596 ADVANCE_BATCH();
597 }
598 }
599
600 const struct brw_tracked_state brw_state_base_address = {
601 .dirty = {
602 .mesa = 0,
603 .brw = BRW_NEW_CONTEXT,
604 .cache = 0,
605 },
606 .emit = upload_state_base_address
607 };