i965g: more work on compiling
[mesa.git] / src / gallium / drivers / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_batchbuffer.h"
35 #include "brw_context.h"
36 #include "brw_state.h"
37 #include "brw_defines.h"
38
39
40
41
42
43 /***********************************************************************
44 * Blend color
45 */
46
47 static void upload_blend_constant_color(struct brw_context *brw)
48 {
49 struct brw_blend_constant_color bcc;
50
51 memset(&bcc, 0, sizeof(bcc));
52 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
53 bcc.header.length = sizeof(bcc)/4-2;
54 bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
55 bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
56 bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
57 bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
58
59 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
60 }
61
62
63 const struct brw_tracked_state brw_blend_constant_color = {
64 .dirty = {
65 .mesa = _NEW_COLOR,
66 .brw = 0,
67 .cache = 0
68 },
69 .emit = upload_blend_constant_color
70 };
71
72 /* Constant single cliprect for framebuffer object or DRI2 drawing */
73 static void upload_drawing_rect(struct brw_context *brw)
74 {
75 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
76 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
77 OUT_BATCH(0);
78 OUT_BATCH(((brw->fb.width - 1) & 0xffff) |
79 ((brw->fb.height - 1) << 16));
80 OUT_BATCH(0);
81 ADVANCE_BATCH();
82 }
83
84 const struct brw_tracked_state brw_drawing_rect = {
85 .dirty = {
86 .mesa = _NEW_BUFFERS,
87 .brw = 0,
88 .cache = 0
89 },
90 .emit = upload_drawing_rect
91 };
92
93 static void prepare_binding_table_pointers(struct brw_context *brw)
94 {
95 brw_add_validated_bo(brw, brw->vs.bind_bo);
96 brw_add_validated_bo(brw, brw->wm.bind_bo);
97 }
98
99 /**
100 * Upload the binding table pointers, which point each stage's array of surface
101 * state pointers.
102 *
103 * The binding table pointers are relative to the surface state base address,
104 * which is 0.
105 */
106 static void upload_binding_table_pointers(struct brw_context *brw)
107 {
108 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
109 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
110 if (brw->vs.bind_bo != NULL)
111 OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
112 else
113 OUT_BATCH(0);
114 OUT_BATCH(0); /* gs */
115 OUT_BATCH(0); /* clip */
116 OUT_BATCH(0); /* sf */
117 OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
118 ADVANCE_BATCH();
119 }
120
121 const struct brw_tracked_state brw_binding_table_pointers = {
122 .dirty = {
123 .mesa = 0,
124 .brw = BRW_NEW_BATCH,
125 .cache = CACHE_NEW_SURF_BIND,
126 },
127 .prepare = prepare_binding_table_pointers,
128 .emit = upload_binding_table_pointers,
129 };
130
131
132 /**
133 * Upload pointers to the per-stage state.
134 *
135 * The state pointers in this packet are all relative to the general state
136 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
137 */
138 static void upload_pipelined_state_pointers(struct brw_context *brw )
139 {
140 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
141 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
142 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
143 if (brw->gs.prog_active)
144 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
145 else
146 OUT_BATCH(0);
147 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
148 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
149 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
150 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
151 ADVANCE_BATCH();
152
153 brw->state.dirty.brw |= BRW_NEW_PSP;
154 }
155
156
157 static void prepare_psp_urb_cbs(struct brw_context *brw)
158 {
159 brw_add_validated_bo(brw, brw->vs.state_bo);
160 brw_add_validated_bo(brw, brw->gs.state_bo);
161 brw_add_validated_bo(brw, brw->clip.state_bo);
162 brw_add_validated_bo(brw, brw->sf.state_bo);
163 brw_add_validated_bo(brw, brw->wm.state_bo);
164 brw_add_validated_bo(brw, brw->cc.state_bo);
165 }
166
167 static void upload_psp_urb_cbs(struct brw_context *brw )
168 {
169 upload_pipelined_state_pointers(brw);
170 brw_upload_urb_fence(brw);
171 brw_upload_cs_urb_state(brw);
172 }
173
174 const struct brw_tracked_state brw_psp_urb_cbs = {
175 .dirty = {
176 .mesa = 0,
177 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
178 .cache = (CACHE_NEW_VS_UNIT |
179 CACHE_NEW_GS_UNIT |
180 CACHE_NEW_GS_PROG |
181 CACHE_NEW_CLIP_UNIT |
182 CACHE_NEW_SF_UNIT |
183 CACHE_NEW_WM_UNIT |
184 CACHE_NEW_CC_UNIT)
185 },
186 .prepare = prepare_psp_urb_cbs,
187 .emit = upload_psp_urb_cbs,
188 };
189
190 static void prepare_depthbuffer(struct brw_context *brw)
191 {
192 struct intel_region *region = brw->state.depth_region;
193
194 if (region != NULL)
195 brw_add_validated_bo(brw, region->buffer);
196 }
197
198 static void emit_depthbuffer(struct brw_context *brw)
199 {
200 struct intel_region *region = brw->state.depth_region;
201 unsigned int len = (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw)) ? 6 : 5;
202
203 if (region == NULL) {
204 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
205 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
206 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
207 (BRW_SURFACE_NULL << 29));
208 OUT_BATCH(0);
209 OUT_BATCH(0);
210 OUT_BATCH(0);
211
212 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
213 OUT_BATCH(0);
214
215 ADVANCE_BATCH();
216 } else {
217 unsigned int format;
218
219 switch (region->cpp) {
220 case 2:
221 format = BRW_DEPTHFORMAT_D16_UNORM;
222 break;
223 case 4:
224 if (intel->depth_buffer_is_float)
225 format = BRW_DEPTHFORMAT_D32_FLOAT;
226 else
227 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
228 break;
229 default:
230 assert(0);
231 return;
232 }
233
234 assert(region->tiling != I915_TILING_X);
235
236 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
237 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
238 OUT_BATCH(((region->pitch * region->cpp) - 1) |
239 (format << 18) |
240 (BRW_TILEWALK_YMAJOR << 26) |
241 ((region->tiling != I915_TILING_NONE) << 27) |
242 (BRW_SURFACE_2D << 29));
243 OUT_RELOC(region->buffer,
244 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
245 0);
246 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
247 ((region->pitch - 1) << 6) |
248 ((region->height - 1) << 19));
249 OUT_BATCH(0);
250
251 if (BRW_IS_G4X(brw) || BRW_IS_IGDNG(brw))
252 OUT_BATCH(0);
253
254 ADVANCE_BATCH();
255 }
256 }
257
258 const struct brw_tracked_state brw_depthbuffer = {
259 .dirty = {
260 .mesa = 0,
261 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
262 .cache = 0,
263 },
264 .prepare = prepare_depthbuffer,
265 .emit = emit_depthbuffer,
266 };
267
268
269
270 /***********************************************************************
271 * Polygon stipple packet
272 */
273
274 static void upload_polygon_stipple(struct brw_context *brw)
275 {
276 struct brw_polygon_stipple bps;
277 GLuint i;
278
279 memset(&bps, 0, sizeof(bps));
280 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
281 bps.header.length = sizeof(bps)/4-2;
282
283 /* Polygon stipple is provided in OpenGL order, i.e. bottom
284 * row first. If we're rendering to a window (i.e. the
285 * default frame buffer object, 0), then we need to invert
286 * it to match our pixel layout. But if we're rendering
287 * to a FBO (i.e. any named frame buffer object), we *don't*
288 * need to invert - we already match the layout.
289 */
290 if (ctx->DrawBuffer->Name == 0) {
291 for (i = 0; i < 32; i++)
292 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
293 }
294 else {
295 for (i = 0; i < 32; i++)
296 bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
297 }
298
299 BRW_CACHED_BATCH_STRUCT(brw, &bps);
300 }
301
302 const struct brw_tracked_state brw_polygon_stipple = {
303 .dirty = {
304 .mesa = _NEW_POLYGONSTIPPLE,
305 .brw = 0,
306 .cache = 0
307 },
308 .emit = upload_polygon_stipple
309 };
310
311
312 /***********************************************************************
313 * Polygon stipple offset packet
314 */
315
316 static void upload_polygon_stipple_offset(struct brw_context *brw)
317 {
318 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
319 struct brw_polygon_stipple_offset bpso;
320
321 memset(&bpso, 0, sizeof(bpso));
322 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
323 bpso.header.length = sizeof(bpso)/4-2;
324
325 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
326 * we have to invert the Y axis in order to match the OpenGL
327 * pixel coordinate system, and our offset must be matched
328 * to the window position. If we're drawing to a FBO
329 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
330 * system works just fine, and there's no window system to
331 * worry about.
332 */
333 if (brw->intel.ctx.DrawBuffer->Name == 0) {
334 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
335 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
336 }
337 else {
338 bpso.bits0.y_offset = 0;
339 bpso.bits0.x_offset = 0;
340 }
341
342 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
343 }
344
345 #define _NEW_WINDOW_POS 0x40000000
346
347 const struct brw_tracked_state brw_polygon_stipple_offset = {
348 .dirty = {
349 .mesa = _NEW_WINDOW_POS,
350 .brw = 0,
351 .cache = 0
352 },
353 .emit = upload_polygon_stipple_offset
354 };
355
356 /**********************************************************************
357 * AA Line parameters
358 */
359 static void upload_aa_line_parameters(struct brw_context *brw)
360 {
361 struct brw_aa_line_parameters balp;
362
363 if (BRW_IS_965(brw))
364 return;
365
366 /* use legacy aa line coverage computation */
367 memset(&balp, 0, sizeof(balp));
368 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
369 balp.header.length = sizeof(balp) / 4 - 2;
370
371 BRW_CACHED_BATCH_STRUCT(brw, &balp);
372 }
373
374 const struct brw_tracked_state brw_aa_line_parameters = {
375 .dirty = {
376 .mesa = 0,
377 .brw = BRW_NEW_CONTEXT,
378 .cache = 0
379 },
380 .emit = upload_aa_line_parameters
381 };
382
383 /***********************************************************************
384 * Line stipple packet
385 */
386
387 static void upload_line_stipple(struct brw_context *brw)
388 {
389 struct brw_line_stipple bls;
390 GLfloat tmp;
391 GLint tmpi;
392
393 memset(&bls, 0, sizeof(bls));
394 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
395 bls.header.length = sizeof(bls)/4 - 2;
396
397 bls.bits0.pattern = ctx->Line.StipplePattern;
398 bls.bits1.repeat_count = ctx->Line.StippleFactor;
399
400 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
401 tmpi = tmp * (1<<13);
402
403
404 bls.bits1.inverse_repeat_count = tmpi;
405
406 BRW_CACHED_BATCH_STRUCT(brw, &bls);
407 }
408
409 const struct brw_tracked_state brw_line_stipple = {
410 .dirty = {
411 .mesa = _NEW_LINE,
412 .brw = 0,
413 .cache = 0
414 },
415 .emit = upload_line_stipple
416 };
417
418
419 /***********************************************************************
420 * Misc invarient state packets
421 */
422
423 static void upload_invarient_state( struct brw_context *brw )
424 {
425 {
426 /* 0x61040000 Pipeline Select */
427 /* PipelineSelect : 0 */
428 struct brw_pipeline_select ps;
429
430 memset(&ps, 0, sizeof(ps));
431 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
432 ps.header.pipeline_select = 0;
433 BRW_BATCH_STRUCT(brw, &ps);
434 }
435
436 {
437 struct brw_global_depth_offset_clamp gdo;
438 memset(&gdo, 0, sizeof(gdo));
439
440 /* Disable depth offset clamping.
441 */
442 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
443 gdo.header.length = sizeof(gdo)/4 - 2;
444 gdo.depth_offset_clamp = 0.0;
445
446 BRW_BATCH_STRUCT(brw, &gdo);
447 }
448
449
450 /* 0x61020000 State Instruction Pointer */
451 {
452 struct brw_system_instruction_pointer sip;
453 memset(&sip, 0, sizeof(sip));
454
455 sip.header.opcode = CMD_STATE_INSN_POINTER;
456 sip.header.length = 0;
457 sip.bits0.pad = 0;
458 sip.bits0.system_instruction_pointer = 0;
459 BRW_BATCH_STRUCT(brw, &sip);
460 }
461
462
463 {
464 struct brw_vf_statistics vfs;
465 memset(&vfs, 0, sizeof(vfs));
466
467 vfs.opcode = CMD_VF_STATISTICS(brw);
468 if (INTEL_DEBUG & DEBUG_STATS)
469 vfs.statistics_enable = 1;
470
471 BRW_BATCH_STRUCT(brw, &vfs);
472 }
473 }
474
475 const struct brw_tracked_state brw_invarient_state = {
476 .dirty = {
477 .mesa = 0,
478 .brw = BRW_NEW_CONTEXT,
479 .cache = 0
480 },
481 .emit = upload_invarient_state
482 };
483
484 /**
485 * Define the base addresses which some state is referenced from.
486 *
487 * This allows us to avoid having to emit relocations in many places for
488 * cached state, and instead emit pointers inside of large, mostly-static
489 * state pools. This comes at the expense of memory, and more expensive cache
490 * misses.
491 */
492 static void upload_state_base_address( struct brw_context *brw )
493 {
494 /* Output the structure (brw_state_base_address) directly to the
495 * batchbuffer, so we can emit relocations inline.
496 */
497 if (BRW_IS_IGDNG(brw)) {
498 BEGIN_BATCH(8, IGNORE_CLIPRECTS);
499 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
500 OUT_BATCH(1); /* General state base address */
501 OUT_BATCH(1); /* Surface state base address */
502 OUT_BATCH(1); /* Indirect object base address */
503 OUT_BATCH(1); /* Instruction base address */
504 OUT_BATCH(1); /* General state upper bound */
505 OUT_BATCH(1); /* Indirect object upper bound */
506 OUT_BATCH(1); /* Instruction access upper bound */
507 ADVANCE_BATCH();
508 } else {
509 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
510 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
511 OUT_BATCH(1); /* General state base address */
512 OUT_BATCH(1); /* Surface state base address */
513 OUT_BATCH(1); /* Indirect object base address */
514 OUT_BATCH(1); /* General state upper bound */
515 OUT_BATCH(1); /* Indirect object upper bound */
516 ADVANCE_BATCH();
517 }
518 }
519
520 const struct brw_tracked_state brw_state_base_address = {
521 .dirty = {
522 .mesa = 0,
523 .brw = BRW_NEW_CONTEXT,
524 .cache = 0,
525 },
526 .emit = upload_state_base_address
527 };