i965/gen4: Move VS state to state streaming.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41 /* Constant single cliprect for framebuffer object or DRI2 drawing */
42 static void upload_drawing_rect(struct brw_context *brw)
43 {
44 struct intel_context *intel = &brw->intel;
45 struct gl_context *ctx = &intel->ctx;
46
47 BEGIN_BATCH(4);
48 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
49 OUT_BATCH(0); /* xmin, ymin */
50 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
51 ((ctx->DrawBuffer->Height - 1) << 16));
52 OUT_BATCH(0);
53 ADVANCE_BATCH();
54 }
55
56 const struct brw_tracked_state brw_drawing_rect = {
57 .dirty = {
58 .mesa = _NEW_BUFFERS,
59 .brw = BRW_NEW_CONTEXT,
60 .cache = 0
61 },
62 .emit = upload_drawing_rect
63 };
64
65 /**
66 * Upload the binding table pointers, which point each stage's array of surface
67 * state pointers.
68 *
69 * The binding table pointers are relative to the surface state base address,
70 * which points at the batchbuffer containing the streamed batch state.
71 */
72 static void upload_binding_table_pointers(struct brw_context *brw)
73 {
74 struct intel_context *intel = &brw->intel;
75
76 BEGIN_BATCH(6);
77 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 | (6 - 2));
78 OUT_BATCH(brw->vs.bind_bo_offset);
79 OUT_BATCH(0); /* gs */
80 OUT_BATCH(0); /* clip */
81 OUT_BATCH(0); /* sf */
82 OUT_BATCH(brw->wm.bind_bo_offset);
83 ADVANCE_BATCH();
84 }
85
86 const struct brw_tracked_state brw_binding_table_pointers = {
87 .dirty = {
88 .mesa = 0,
89 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
90 .cache = 0,
91 },
92 .emit = upload_binding_table_pointers,
93 };
94
95 /**
96 * Upload the binding table pointers, which point each stage's array of surface
97 * state pointers.
98 *
99 * The binding table pointers are relative to the surface state base address,
100 * which points at the batchbuffer containing the streamed batch state.
101 */
102 static void upload_gen6_binding_table_pointers(struct brw_context *brw)
103 {
104 struct intel_context *intel = &brw->intel;
105
106 BEGIN_BATCH(4);
107 OUT_BATCH(_3DSTATE_BINDING_TABLE_POINTERS << 16 |
108 GEN6_BINDING_TABLE_MODIFY_VS |
109 GEN6_BINDING_TABLE_MODIFY_GS |
110 GEN6_BINDING_TABLE_MODIFY_PS |
111 (4 - 2));
112 OUT_BATCH(brw->vs.bind_bo_offset); /* vs */
113 OUT_BATCH(0); /* gs */
114 OUT_BATCH(brw->wm.bind_bo_offset); /* wm/ps */
115 ADVANCE_BATCH();
116 }
117
118 const struct brw_tracked_state gen6_binding_table_pointers = {
119 .dirty = {
120 .mesa = 0,
121 .brw = BRW_NEW_BATCH | BRW_NEW_BINDING_TABLE,
122 .cache = 0,
123 },
124 .emit = upload_gen6_binding_table_pointers,
125 };
126
127 /**
128 * Upload pointers to the per-stage state.
129 *
130 * The state pointers in this packet are all relative to the general state
131 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
132 */
133 static void upload_pipelined_state_pointers(struct brw_context *brw )
134 {
135 struct intel_context *intel = &brw->intel;
136
137 if (intel->gen == 5) {
138 /* Need to flush before changing clip max threads for errata. */
139 BEGIN_BATCH(1);
140 OUT_BATCH(MI_FLUSH);
141 ADVANCE_BATCH();
142 }
143
144 BEGIN_BATCH(7);
145 OUT_BATCH(_3DSTATE_PIPELINED_POINTERS << 16 | (7 - 2));
146 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
147 brw->vs.state_offset);
148 if (brw->gs.prog_active)
149 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
150 else
151 OUT_BATCH(0);
152 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
153 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
154 brw->sf.state_offset);
155 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
156 brw->wm.state_offset);
157 OUT_RELOC(brw->intel.batch.bo, I915_GEM_DOMAIN_INSTRUCTION, 0,
158 brw->cc.state_offset);
159 ADVANCE_BATCH();
160
161 brw->state.dirty.brw |= BRW_NEW_PSP;
162 }
163
164
165 static void prepare_psp_urb_cbs(struct brw_context *brw)
166 {
167 brw_add_validated_bo(brw, brw->gs.state_bo);
168 brw_add_validated_bo(brw, brw->clip.state_bo);
169 }
170
171 static void upload_psp_urb_cbs(struct brw_context *brw )
172 {
173 upload_pipelined_state_pointers(brw);
174 brw_upload_urb_fence(brw);
175 brw_upload_cs_urb_state(brw);
176 }
177
178 const struct brw_tracked_state brw_psp_urb_cbs = {
179 .dirty = {
180 .mesa = 0,
181 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
182 .cache = (CACHE_NEW_VS_UNIT |
183 CACHE_NEW_GS_UNIT |
184 CACHE_NEW_GS_PROG |
185 CACHE_NEW_CLIP_UNIT |
186 CACHE_NEW_SF_UNIT |
187 CACHE_NEW_WM_UNIT |
188 CACHE_NEW_CC_UNIT)
189 },
190 .prepare = prepare_psp_urb_cbs,
191 .emit = upload_psp_urb_cbs,
192 };
193
194 static void prepare_depthbuffer(struct brw_context *brw)
195 {
196 struct intel_region *region = brw->state.depth_region;
197
198 if (region != NULL)
199 brw_add_validated_bo(brw, region->buffer);
200 }
201
202 static void emit_depthbuffer(struct brw_context *brw)
203 {
204 struct intel_context *intel = &brw->intel;
205 struct intel_region *region = brw->state.depth_region;
206 unsigned int len;
207
208 if (intel->gen >= 6)
209 len = 7;
210 else if (intel->is_g4x || intel->gen == 5)
211 len = 6;
212 else
213 len = 5;
214
215 if (region == NULL) {
216 BEGIN_BATCH(len);
217 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
218 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
219 (BRW_SURFACE_NULL << 29));
220 OUT_BATCH(0);
221 OUT_BATCH(0);
222 OUT_BATCH(0);
223
224 if (intel->is_g4x || intel->gen >= 5)
225 OUT_BATCH(0);
226
227 if (intel->gen >= 6)
228 OUT_BATCH(0);
229
230 ADVANCE_BATCH();
231 } else {
232 unsigned int format;
233
234 switch (region->cpp) {
235 case 2:
236 format = BRW_DEPTHFORMAT_D16_UNORM;
237 break;
238 case 4:
239 if (intel->depth_buffer_is_float)
240 format = BRW_DEPTHFORMAT_D32_FLOAT;
241 else
242 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
243 break;
244 default:
245 assert(0);
246 return;
247 }
248
249 assert(region->tiling != I915_TILING_X);
250 assert(intel->gen < 6 || region->tiling == I915_TILING_Y);
251
252 BEGIN_BATCH(len);
253 OUT_BATCH(_3DSTATE_DEPTH_BUFFER << 16 | (len - 2));
254 OUT_BATCH(((region->pitch * region->cpp) - 1) |
255 (format << 18) |
256 (BRW_TILEWALK_YMAJOR << 26) |
257 ((region->tiling != I915_TILING_NONE) << 27) |
258 (BRW_SURFACE_2D << 29));
259 OUT_RELOC(region->buffer,
260 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
261 0);
262 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
263 ((region->width - 1) << 6) |
264 ((region->height - 1) << 19));
265 OUT_BATCH(0);
266
267 if (intel->is_g4x || intel->gen >= 5)
268 OUT_BATCH(0);
269
270 if (intel->gen >= 6)
271 OUT_BATCH(0);
272
273 ADVANCE_BATCH();
274 }
275
276 /* Initialize it for safety. */
277 if (intel->gen >= 6) {
278 BEGIN_BATCH(2);
279 OUT_BATCH(_3DSTATE_CLEAR_PARAMS << 16 | (2 - 2));
280 OUT_BATCH(0);
281 ADVANCE_BATCH();
282 }
283 }
284
285 /**
286 * \see brw_context.state.depth_region
287 */
288 const struct brw_tracked_state brw_depthbuffer = {
289 .dirty = {
290 .mesa = 0,
291 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
292 .cache = 0,
293 },
294 .prepare = prepare_depthbuffer,
295 .emit = emit_depthbuffer,
296 };
297
298
299
300 /***********************************************************************
301 * Polygon stipple packet
302 */
303
304 static void upload_polygon_stipple(struct brw_context *brw)
305 {
306 struct intel_context *intel = &brw->intel;
307 struct gl_context *ctx = &brw->intel.ctx;
308 GLuint i;
309
310 if (!ctx->Polygon.StippleFlag)
311 return;
312
313 BEGIN_BATCH(33);
314 OUT_BATCH(_3DSTATE_POLY_STIPPLE_PATTERN << 16 | (33 - 2));
315
316 /* Polygon stipple is provided in OpenGL order, i.e. bottom
317 * row first. If we're rendering to a window (i.e. the
318 * default frame buffer object, 0), then we need to invert
319 * it to match our pixel layout. But if we're rendering
320 * to a FBO (i.e. any named frame buffer object), we *don't*
321 * need to invert - we already match the layout.
322 */
323 if (ctx->DrawBuffer->Name == 0) {
324 for (i = 0; i < 32; i++)
325 OUT_BATCH(ctx->PolygonStipple[31 - i]); /* invert */
326 }
327 else {
328 for (i = 0; i < 32; i++)
329 OUT_BATCH(ctx->PolygonStipple[i]);
330 }
331 CACHED_BATCH();
332 }
333
334 const struct brw_tracked_state brw_polygon_stipple = {
335 .dirty = {
336 .mesa = _NEW_POLYGONSTIPPLE,
337 .brw = BRW_NEW_CONTEXT,
338 .cache = 0
339 },
340 .emit = upload_polygon_stipple
341 };
342
343
344 /***********************************************************************
345 * Polygon stipple offset packet
346 */
347
348 static void upload_polygon_stipple_offset(struct brw_context *brw)
349 {
350 struct intel_context *intel = &brw->intel;
351 struct gl_context *ctx = &brw->intel.ctx;
352
353 if (!ctx->Polygon.StippleFlag)
354 return;
355
356 BEGIN_BATCH(2);
357 OUT_BATCH(_3DSTATE_POLY_STIPPLE_OFFSET << 16 | (2-2));
358
359 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
360 * we have to invert the Y axis in order to match the OpenGL
361 * pixel coordinate system, and our offset must be matched
362 * to the window position. If we're drawing to a FBO
363 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
364 * system works just fine, and there's no window system to
365 * worry about.
366 */
367 if (brw->intel.ctx.DrawBuffer->Name == 0)
368 OUT_BATCH((32 - (ctx->DrawBuffer->Height & 31)) & 31);
369 else
370 OUT_BATCH(0);
371 CACHED_BATCH();
372 }
373
374 #define _NEW_WINDOW_POS 0x40000000
375
376 const struct brw_tracked_state brw_polygon_stipple_offset = {
377 .dirty = {
378 .mesa = _NEW_WINDOW_POS | _NEW_POLYGONSTIPPLE,
379 .brw = BRW_NEW_CONTEXT,
380 .cache = 0
381 },
382 .emit = upload_polygon_stipple_offset
383 };
384
385 /**********************************************************************
386 * AA Line parameters
387 */
388 static void upload_aa_line_parameters(struct brw_context *brw)
389 {
390 struct intel_context *intel = &brw->intel;
391 struct gl_context *ctx = &brw->intel.ctx;
392
393 if (!ctx->Line.SmoothFlag || !brw->has_aa_line_parameters)
394 return;
395
396 OUT_BATCH(_3DSTATE_AA_LINE_PARAMETERS << 16 | (3 - 2));
397 /* use legacy aa line coverage computation */
398 OUT_BATCH(0);
399 OUT_BATCH(0);
400 CACHED_BATCH();
401 }
402
403 const struct brw_tracked_state brw_aa_line_parameters = {
404 .dirty = {
405 .mesa = _NEW_LINE,
406 .brw = BRW_NEW_CONTEXT,
407 .cache = 0
408 },
409 .emit = upload_aa_line_parameters
410 };
411
412 /***********************************************************************
413 * Line stipple packet
414 */
415
416 static void upload_line_stipple(struct brw_context *brw)
417 {
418 struct intel_context *intel = &brw->intel;
419 struct gl_context *ctx = &brw->intel.ctx;
420 GLfloat tmp;
421 GLint tmpi;
422
423 if (!ctx->Line.StippleFlag)
424 return;
425
426 BEGIN_BATCH(3);
427 OUT_BATCH(_3DSTATE_LINE_STIPPLE_PATTERN << 16 | (3 - 2));
428 OUT_BATCH(ctx->Line.StipplePattern);
429 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
430 tmpi = tmp * (1<<13);
431 OUT_BATCH(tmpi << 16 | ctx->Line.StippleFactor);
432 CACHED_BATCH();
433 }
434
435 const struct brw_tracked_state brw_line_stipple = {
436 .dirty = {
437 .mesa = _NEW_LINE,
438 .brw = BRW_NEW_CONTEXT,
439 .cache = 0
440 },
441 .emit = upload_line_stipple
442 };
443
444
445 /***********************************************************************
446 * Misc invarient state packets
447 */
448
449 static void upload_invarient_state( struct brw_context *brw )
450 {
451 struct intel_context *intel = &brw->intel;
452
453 {
454 /* 0x61040000 Pipeline Select */
455 /* PipelineSelect : 0 */
456 struct brw_pipeline_select ps;
457
458 memset(&ps, 0, sizeof(ps));
459 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
460 ps.header.pipeline_select = 0;
461 BRW_BATCH_STRUCT(brw, &ps);
462 }
463
464 if (intel->gen < 6) {
465 struct brw_global_depth_offset_clamp gdo;
466 memset(&gdo, 0, sizeof(gdo));
467
468 /* Disable depth offset clamping.
469 */
470 gdo.header.opcode = _3DSTATE_GLOBAL_DEPTH_OFFSET_CLAMP;
471 gdo.header.length = sizeof(gdo)/4 - 2;
472 gdo.depth_offset_clamp = 0.0;
473
474 BRW_BATCH_STRUCT(brw, &gdo);
475 }
476
477 if (intel->gen >= 6) {
478 int i;
479
480 BEGIN_BATCH(3);
481 OUT_BATCH(_3DSTATE_MULTISAMPLE << 16 | (3 - 2));
482 OUT_BATCH(MS_PIXEL_LOCATION_CENTER |
483 MS_NUMSAMPLES_1);
484 OUT_BATCH(0); /* positions for 4/8-sample */
485 ADVANCE_BATCH();
486
487 BEGIN_BATCH(2);
488 OUT_BATCH(_3DSTATE_SAMPLE_MASK << 16 | (2 - 2));
489 OUT_BATCH(1);
490 ADVANCE_BATCH();
491
492 for (i = 0; i < 4; i++) {
493 BEGIN_BATCH(4);
494 OUT_BATCH(_3DSTATE_GS_SVB_INDEX << 16 | (4 - 2));
495 OUT_BATCH(i << SVB_INDEX_SHIFT);
496 OUT_BATCH(0);
497 OUT_BATCH(0xffffffff);
498 ADVANCE_BATCH();
499 }
500 }
501
502 /* 0x61020000 State Instruction Pointer */
503 {
504 struct brw_system_instruction_pointer sip;
505 memset(&sip, 0, sizeof(sip));
506
507 sip.header.opcode = CMD_STATE_INSN_POINTER;
508 sip.header.length = 0;
509 sip.bits0.pad = 0;
510 sip.bits0.system_instruction_pointer = 0;
511 BRW_BATCH_STRUCT(brw, &sip);
512 }
513
514
515 {
516 struct brw_vf_statistics vfs;
517 memset(&vfs, 0, sizeof(vfs));
518
519 vfs.opcode = brw->CMD_VF_STATISTICS;
520 if (unlikely(INTEL_DEBUG & DEBUG_STATS))
521 vfs.statistics_enable = 1;
522
523 BRW_BATCH_STRUCT(brw, &vfs);
524 }
525 }
526
527 const struct brw_tracked_state brw_invarient_state = {
528 .dirty = {
529 .mesa = 0,
530 .brw = BRW_NEW_CONTEXT,
531 .cache = 0
532 },
533 .emit = upload_invarient_state
534 };
535
536 /**
537 * Define the base addresses which some state is referenced from.
538 *
539 * This allows us to avoid having to emit relocations for the objects,
540 * and is actually required for binding table pointers on gen6.
541 *
542 * Surface state base address covers binding table pointers and
543 * surface state objects, but not the surfaces that the surface state
544 * objects point to.
545 */
546 static void upload_state_base_address( struct brw_context *brw )
547 {
548 struct intel_context *intel = &brw->intel;
549
550 if (intel->gen >= 6) {
551 BEGIN_BATCH(10);
552 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (10 - 2));
553 /* General state base address: stateless DP read/write requests */
554 OUT_BATCH(1);
555 /* Surface state base address:
556 * BINDING_TABLE_STATE
557 * SURFACE_STATE
558 */
559 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0, 1);
560 /* Dynamic state base address:
561 * SAMPLER_STATE
562 * SAMPLER_BORDER_COLOR_STATE
563 * CLIP, SF, WM/CC viewport state
564 * COLOR_CALC_STATE
565 * DEPTH_STENCIL_STATE
566 * BLEND_STATE
567 * Push constants (when INSTPM: CONSTANT_BUFFER Address Offset
568 * Disable is clear, which we rely on)
569 */
570 OUT_RELOC(intel->batch.bo, (I915_GEM_DOMAIN_RENDER |
571 I915_GEM_DOMAIN_INSTRUCTION), 0, 1);
572
573 OUT_BATCH(1); /* Indirect object base address: MEDIA_OBJECT data */
574 OUT_BATCH(1); /* Instruction base address: shader kernels (incl. SIP) */
575 OUT_BATCH(1); /* General state upper bound */
576 OUT_BATCH(1); /* Dynamic state upper bound */
577 OUT_BATCH(1); /* Indirect object upper bound */
578 OUT_BATCH(1); /* Instruction access upper bound */
579 ADVANCE_BATCH();
580 } else if (intel->gen == 5) {
581 BEGIN_BATCH(8);
582 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
583 OUT_BATCH(1); /* General state base address */
584 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
585 1); /* Surface state base address */
586 OUT_BATCH(1); /* Indirect object base address */
587 OUT_BATCH(1); /* Instruction base address */
588 OUT_BATCH(1); /* General state upper bound */
589 OUT_BATCH(1); /* Indirect object upper bound */
590 OUT_BATCH(1); /* Instruction access upper bound */
591 ADVANCE_BATCH();
592 } else {
593 BEGIN_BATCH(6);
594 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
595 OUT_BATCH(1); /* General state base address */
596 OUT_RELOC(intel->batch.bo, I915_GEM_DOMAIN_SAMPLER, 0,
597 1); /* Surface state base address */
598 OUT_BATCH(1); /* Indirect object base address */
599 OUT_BATCH(1); /* General state upper bound */
600 OUT_BATCH(1); /* Indirect object upper bound */
601 ADVANCE_BATCH();
602 }
603 }
604
605 const struct brw_tracked_state brw_state_base_address = {
606 .dirty = {
607 .mesa = 0,
608 .brw = BRW_NEW_BATCH,
609 .cache = 0,
610 },
611 .emit = upload_state_base_address
612 };