Merge branch 'llvm-cliptest-viewport'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct gl_context *ctx = &brw->intel.ctx;
52 struct brw_blend_constant_color bcc;
53
54 memset(&bcc, 0, sizeof(bcc));
55 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
56 bcc.header.length = sizeof(bcc)/4-2;
57 bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
58 bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
59 bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
60 bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
61
62 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
63 }
64
65
66 const struct brw_tracked_state brw_blend_constant_color = {
67 .dirty = {
68 .mesa = _NEW_COLOR,
69 .brw = BRW_NEW_CONTEXT,
70 .cache = 0
71 },
72 .emit = upload_blend_constant_color
73 };
74
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context *brw)
77 {
78 struct intel_context *intel = &brw->intel;
79 struct gl_context *ctx = &intel->ctx;
80
81 BEGIN_BATCH(4);
82 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
83 OUT_BATCH(0); /* xmin, ymin */
84 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
85 ((ctx->DrawBuffer->Height - 1) << 16));
86 OUT_BATCH(0);
87 ADVANCE_BATCH();
88 }
89
90 const struct brw_tracked_state brw_drawing_rect = {
91 .dirty = {
92 .mesa = _NEW_BUFFERS,
93 .brw = BRW_NEW_CONTEXT,
94 .cache = 0
95 },
96 .emit = upload_drawing_rect
97 };
98
99 /**
100 * Upload the binding table pointers, which point each stage's array of surface
101 * state pointers.
102 *
103 * The binding table pointers are relative to the surface state base address,
104 * which points at the batchbuffer containing the streamed batch state.
105 */
106 static void upload_binding_table_pointers(struct brw_context *brw)
107 {
108 struct intel_context *intel = &brw->intel;
109
110 BEGIN_BATCH(6);
111 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
112 OUT_BATCH(brw->vs.bind_bo_offset);
113 OUT_BATCH(0); /* gs */
114 OUT_BATCH(0); /* clip */
115 OUT_BATCH(0); /* sf */
116 OUT_BATCH(brw->wm.bind_bo_offset);
117 ADVANCE_BATCH();
118 }
119
120 const struct brw_tracked_state brw_binding_table_pointers = {
121 .dirty = {
122 .mesa = 0,
123 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
124 .cache = 0,
125 },
126 .emit = upload_binding_table_pointers,
127 };
128
129 /**
130 * Upload the binding table pointers, which point each stage's array of surface
131 * state pointers.
132 *
133 * The binding table pointers are relative to the surface state base address,
134 * which points at the batchbuffer containing the streamed batch state.
135 */
136 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
137 {
138 struct intel_context *intel = &brw->intel;
139
140 BEGIN_BATCH(4);
141 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 |
142 GEN6_BINDING_TABLE_MODIFY_VS |
143 GEN6_BINDING_TABLE_MODIFY_GS |
144 GEN6_BINDING_TABLE_MODIFY_PS |
145 (4 - 2));
146 OUT_BATCH(brw->vs.bind_bo_offset); /* vs */
147 OUT_BATCH(0); /* gs */
148 OUT_BATCH(brw->wm.bind_bo_offset); /* wm/ps */
149 ADVANCE_BATCH();
150 }
151
152 const struct brw_tracked_state gen6_binding_table_pointers = {
153 .dirty = {
154 .mesa = 0,
155 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
156 .cache = 0,
157 },
158 .emit = upload_gen6_binding_table_pointers,
159 };
160
161 /**
162 * Upload pointers to the per-stage state.
163 *
164 * The state pointers in this packet are all relative to the general state
165 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
166 */
167 static void upload_pipelined_state_pointers(struct brw_context *brw )
168 {
169 struct intel_context *intel = &brw->intel;
170
171 if (intel->gen == 5) {
172 /* Need to flush before changing clip max threads for errata. */
173 BEGIN_BATCH(1);
174 OUT_BATCH(MI_FLUSH);
175 ADVANCE_BATCH();
176 }
177
178 BEGIN_BATCH(7);
179 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
180 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
181 if (brw->gs.prog_active)
182 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
183 else
184 OUT_BATCH(0);
185 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
186 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
187 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
188 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
189 brw->cc.state_offset);
190 ADVANCE_BATCH();
191
192 brw->state.dirty.brw |= BRW_NEW_PSP;
193 }
194
195
196 static void prepare_psp_urb_cbs(struct brw_context *brw)
197 {
198 brw_add_validated_bo(brw, brw->vs.state_bo);
199 brw_add_validated_bo(brw, brw->gs.state_bo);
200 brw_add_validated_bo(brw, brw->clip.state_bo);
201 brw_add_validated_bo(brw, brw->sf.state_bo);
202 brw_add_validated_bo(brw, brw->wm.state_bo);
203 }
204
205 static void upload_psp_urb_cbs(struct brw_context *brw )
206 {
207 upload_pipelined_state_pointers(brw);
208 brw_upload_urb_fence(brw);
209 brw_upload_cs_urb_state(brw);
210 }
211
212 const struct brw_tracked_state brw_psp_urb_cbs = {
213 .dirty = {
214 .mesa = 0,
215 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
216 .cache = (CACHE_NEW_VS_UNIT |
217 CACHE_NEW_GS_UNIT |
218 CACHE_NEW_GS_PROG |
219 CACHE_NEW_CLIP_UNIT |
220 CACHE_NEW_SF_UNIT |
221 CACHE_NEW_WM_UNIT |
222 CACHE_NEW_CC_UNIT)
223 },
224 .prepare = prepare_psp_urb_cbs,
225 .emit = upload_psp_urb_cbs,
226 };
227
228 static void prepare_depthbuffer(struct brw_context *brw)
229 {
230 struct intel_region *region = brw->state.depth_region;
231
232 if (region != NULL)
233 brw_add_validated_bo(brw, region->buffer);
234 }
235
236 static void emit_depthbuffer(struct brw_context *brw)
237 {
238 struct intel_context *intel = &brw->intel;
239 struct intel_region *region = brw->state.depth_region;
240 unsigned int len;
241
242 if (intel->gen >= 6)
243 len = 7;
244 else if (intel->is_g4x || intel->gen == 5)
245 len = 6;
246 else
247 len = 5;
248
249 if (region == NULL) {
250 BEGIN_BATCH(len);
251 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
252 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
253 (BRW_SURFACE_NULL << 29));
254 OUT_BATCH(0);
255 OUT_BATCH(0);
256 OUT_BATCH(0);
257
258 if (intel->is_g4x || intel->gen >= 5)
259 OUT_BATCH(0);
260
261 if (intel->gen >= 6)
262 OUT_BATCH(0);
263
264 ADVANCE_BATCH();
265 } else {
266 unsigned int format;
267
268 switch (region->cpp) {
269 case 2:
270 format = BRW_DEPTHFORMAT_D16_UNORM;
271 break;
272 case 4:
273 if (intel->depth_buffer_is_float)
274 format = BRW_DEPTHFORMAT_D32_FLOAT;
275 else
276 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
277 break;
278 default:
279 assert(0);
280 return;
281 }
282
283 assert(region->tiling != I915_TILING_X);
284 if (intel->gen >= 6)
285 assert(region->tiling != I915_TILING_NONE);
286
287 BEGIN_BATCH(len);
288 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
289 OUT_BATCH(((region->pitch * region->cpp) - 1) |
290 (format << 18) |
291 (BRW_TILEWALK_YMAJOR << 26) |
292 ((region->tiling != I915_TILING_NONE) << 27) |
293 (BRW_SURFACE_2D << 29));
294 OUT_RELOC(region->buffer,
295 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
296 0);
297 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
298 ((region->width - 1) << 6) |
299 ((region->height - 1) << 19));
300 OUT_BATCH(0);
301
302 if (intel->is_g4x || intel->gen >= 5)
303 OUT_BATCH(0);
304
305 if (intel->gen >= 6)
306 OUT_BATCH(0);
307
308 ADVANCE_BATCH();
309 }
310
311 /* Initialize it for safety. */
312 if (intel->gen >= 6) {
313 BEGIN_BATCH(2);
314 OUT_BATCH(CMD_3D_CLEAR_PARAMS << 16 | (2 - 2));
315 OUT_BATCH(0);
316 ADVANCE_BATCH();
317 }
318 }
319
320 const struct brw_tracked_state brw_depthbuffer = {
321 .dirty = {
322 .mesa = 0,
323 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
324 .cache = 0,
325 },
326 .prepare = prepare_depthbuffer,
327 .emit = emit_depthbuffer,
328 };
329
330
331
332 /***********************************************************************
333 * Polygon stipple packet
334 */
335
336 static void upload_polygon_stipple(struct brw_context *brw)
337 {
338 struct gl_context *ctx = &brw->intel.ctx;
339 struct brw_polygon_stipple bps;
340 GLuint i;
341
342 memset(&bps, 0, sizeof(bps));
343 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
344 bps.header.length = sizeof(bps)/4-2;
345
346 /* Polygon stipple is provided in OpenGL order, i.e. bottom
347 * row first. If we're rendering to a window (i.e. the
348 * default frame buffer object, 0), then we need to invert
349 * it to match our pixel layout. But if we're rendering
350 * to a FBO (i.e. any named frame buffer object), we *don't*
351 * need to invert - we already match the layout.
352 */
353 if (ctx->DrawBuffer->Name == 0) {
354 for (i = 0; i < 32; i++)
355 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
356 }
357 else {
358 for (i = 0; i < 32; i++)
359 bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
360 }
361
362 BRW_CACHED_BATCH_STRUCT(brw, &bps);
363 }
364
365 const struct brw_tracked_state brw_polygon_stipple = {
366 .dirty = {
367 .mesa = _NEW_POLYGONSTIPPLE,
368 .brw = BRW_NEW_CONTEXT,
369 .cache = 0
370 },
371 .emit = upload_polygon_stipple
372 };
373
374
375 /***********************************************************************
376 * Polygon stipple offset packet
377 */
378
379 static void upload_polygon_stipple_offset(struct brw_context *brw)
380 {
381 struct gl_context *ctx = &brw->intel.ctx;
382 struct brw_polygon_stipple_offset bpso;
383
384 memset(&bpso, 0, sizeof(bpso));
385 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
386 bpso.header.length = sizeof(bpso)/4-2;
387
388 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
389 * we have to invert the Y axis in order to match the OpenGL
390 * pixel coordinate system, and our offset must be matched
391 * to the window position. If we're drawing to a FBO
392 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
393 * system works just fine, and there's no window system to
394 * worry about.
395 */
396 if (brw->intel.ctx.DrawBuffer->Name == 0) {
397 bpso.bits0.x_offset = 0;
398 bpso.bits0.y_offset = (32 - (ctx->DrawBuffer->Height & 31)) & 31;
399 }
400 else {
401 bpso.bits0.y_offset = 0;
402 bpso.bits0.x_offset = 0;
403 }
404
405 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
406 }
407
408 #define _NEW_WINDOW_POS 0x40000000
409
410 const struct brw_tracked_state brw_polygon_stipple_offset = {
411 .dirty = {
412 .mesa = _NEW_WINDOW_POS,
413 .brw = BRW_NEW_CONTEXT,
414 .cache = 0
415 },
416 .emit = upload_polygon_stipple_offset
417 };
418
419 /**********************************************************************
420 * AA Line parameters
421 */
422 static void upload_aa_line_parameters(struct brw_context *brw)
423 {
424 struct brw_aa_line_parameters balp;
425
426 if (!brw->has_aa_line_parameters)
427 return;
428
429 /* use legacy aa line coverage computation */
430 memset(&balp, 0, sizeof(balp));
431 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
432 balp.header.length = sizeof(balp) / 4 - 2;
433
434 BRW_CACHED_BATCH_STRUCT(brw, &balp);
435 }
436
437 const struct brw_tracked_state brw_aa_line_parameters = {
438 .dirty = {
439 .mesa = 0,
440 .brw = BRW_NEW_CONTEXT,
441 .cache = 0
442 },
443 .emit = upload_aa_line_parameters
444 };
445
446 /***********************************************************************
447 * Line stipple packet
448 */
449
450 static void upload_line_stipple(struct brw_context *brw)
451 {
452 struct gl_context *ctx = &brw->intel.ctx;
453 struct brw_line_stipple bls;
454 GLfloat tmp;
455 GLint tmpi;
456
457 memset(&bls, 0, sizeof(bls));
458 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
459 bls.header.length = sizeof(bls)/4 - 2;
460
461 bls.bits0.pattern = ctx->Line.StipplePattern;
462 bls.bits1.repeat_count = ctx->Line.StippleFactor;
463
464 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
465 tmpi = tmp * (1<<13);
466
467
468 bls.bits1.inverse_repeat_count = tmpi;
469
470 BRW_CACHED_BATCH_STRUCT(brw, &bls);
471 }
472
473 const struct brw_tracked_state brw_line_stipple = {
474 .dirty = {
475 .mesa = _NEW_LINE,
476 .brw = BRW_NEW_CONTEXT,
477 .cache = 0
478 },
479 .emit = upload_line_stipple
480 };
481
482
483 /***********************************************************************
484 * Misc invarient state packets
485 */
486
487 static void upload_invarient_state( struct brw_context *brw )
488 {
489 struct intel_context *intel = &brw->intel;
490
491 {
492 /* 0x61040000 Pipeline Select */
493 /* PipelineSelect : 0 */
494 struct brw_pipeline_select ps;
495
496 memset(&ps, 0, sizeof(ps));
497 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
498 ps.header.pipeline_select = 0;
499 BRW_BATCH_STRUCT(brw, &ps);
500 }
501
502 if (intel->gen < 6) {
503 struct brw_global_depth_offset_clamp gdo;
504 memset(&gdo, 0, sizeof(gdo));
505
506 /* Disable depth offset clamping.
507 */
508 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
509 gdo.header.length = sizeof(gdo)/4 - 2;
510 gdo.depth_offset_clamp = 0.0;
511
512 BRW_BATCH_STRUCT(brw, &gdo);
513 }
514
515 if (intel->gen >= 6) {
516 int i;
517
518 BEGIN_BATCH(3);
519 OUT_BATCH(CMD_3D_MULTISAMPLE << 16 | (3 - 2));
520 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
521 MS_NUMSAMPLES_1);
522 OUT_BATCH(0); /* positions for 4/8-sample */
523 ADVANCE_BATCH();
524
525 BEGIN_BATCH(2);
526 OUT_BATCH(CMD_3D_SAMPLE_MASK << 16 | (2 - 2));
527 OUT_BATCH(1);
528 ADVANCE_BATCH();
529
530 for (i = 0; i < 4; i++) {
531 BEGIN_BATCH(4);
532 OUT_BATCH(CMD_GS_SVB_INDEX << 16 | (4 - 2));
533 OUT_BATCH(i << SVB_INDEX_SHIFT);
534 OUT_BATCH(0);
535 OUT_BATCH(0xffffffff);
536 ADVANCE_BATCH();
537 }
538 }
539
540 /* 0x61020000 State Instruction Pointer */
541 {
542 struct brw_system_instruction_pointer sip;
543 memset(&sip, 0, sizeof(sip));
544
545 sip.header.opcode = CMD_STATE_INSN_POINTER;
546 sip.header.length = 0;
547 sip.bits0.pad = 0;
548 sip.bits0.system_instruction_pointer = 0;
549 BRW_BATCH_STRUCT(brw, &sip);
550 }
551
552
553 {
554 struct brw_vf_statistics vfs;
555 memset(&vfs, 0, sizeof(vfs));
556
557 vfs.opcode = brw->CMD_VF_STATISTICS;
558 if (INTEL_DEBUG & DEBUG_STATS)
559 vfs.statistics_enable = 1;
560
561 BRW_BATCH_STRUCT(brw, &vfs);
562 }
563 }
564
565 const struct brw_tracked_state brw_invarient_state = {
566 .dirty = {
567 .mesa = 0,
568 .brw = BRW_NEW_CONTEXT,
569 .cache = 0
570 },
571 .emit = upload_invarient_state
572 };
573
574 /**
575 * Define the base addresses which some state is referenced from.
576 *
577 * This allows us to avoid having to emit relocations for the objects,
578 * and is actually required for binding table pointers on gen6.
579 *
580 * Surface state base address covers binding table pointers and
581 * surface state objects, but not the surfaces that the surface state
582 * objects point to.
583 */
584 static void upload_state_base_address( struct brw_context *brw )
585 {
586 struct intel_context *intel = &brw->intel;
587
588 if (intel->gen >= 6) {
589 BEGIN_BATCH(10);
590 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
591 OUT_BATCH(1); /* General state base address */
592 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
593 1); /* Surface state base address */
594 OUT_BATCH(1); /* Dynamic state base address */
595 OUT_BATCH(1); /* Indirect object base address */
596 OUT_BATCH(1); /* Instruction base address */
597 OUT_BATCH(1); /* General state upper bound */
598 OUT_BATCH(1); /* Dynamic state upper bound */
599 OUT_BATCH(1); /* Indirect object upper bound */
600 OUT_BATCH(1); /* Instruction access upper bound */
601 ADVANCE_BATCH();
602 } else if (intel->gen == 5) {
603 BEGIN_BATCH(8);
604 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
605 OUT_BATCH(1); /* General state base address */
606 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
607 1); /* Surface state base address */
608 OUT_BATCH(1); /* Indirect object base address */
609 OUT_BATCH(1); /* Instruction base address */
610 OUT_BATCH(1); /* General state upper bound */
611 OUT_BATCH(1); /* Indirect object upper bound */
612 OUT_BATCH(1); /* Instruction access upper bound */
613 ADVANCE_BATCH();
614 } else {
615 BEGIN_BATCH(6);
616 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
617 OUT_BATCH(1); /* General state base address */
618 OUT_RELOC(intel->batch->buf, I915_GEM_DOMAIN_SAMPLER, 0,
619 1); /* Surface state base address */
620 OUT_BATCH(1); /* Indirect object base address */
621 OUT_BATCH(1); /* General state upper bound */
622 OUT_BATCH(1); /* Indirect object upper bound */
623 ADVANCE_BATCH();
624 }
625 }
626
627 const struct brw_tracked_state brw_state_base_address = {
628 .dirty = {
629 .mesa = 0,
630 .brw = BRW_NEW_BATCH,
631 .cache = 0,
632 },
633 .emit = upload_state_base_address
634 };