Merge branch 'master' into autoconf2
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .update = upload_blend_constant_color
72 };
73
74 /***********************************************************************
75 * Drawing rectangle -- Need for AUB file only.
76 */
77 static void upload_drawing_rect(struct brw_context *brw)
78 {
79 struct intel_context *intel = &brw->intel;
80 __DRIdrawablePrivate *dPriv = intel->driDrawable;
81 struct brw_drawrect bdr;
82 int x1, y1;
83 int x2, y2;
84
85 /* If there is a single cliprect, set it here. Otherwise iterate
86 * over them in brw_draw_prim().
87 */
88 if (brw->intel.numClipRects > 1)
89 return;
90
91 x1 = brw->intel.pClipRects[0].x1;
92 y1 = brw->intel.pClipRects[0].y1;
93 x2 = brw->intel.pClipRects[0].x2;
94 y2 = brw->intel.pClipRects[0].y2;
95
96 if (x1 < 0) x1 = 0;
97 if (y1 < 0) y1 = 0;
98 if (x2 > intel->intelScreen->width) x2 = intel->intelScreen->width;
99 if (y2 > intel->intelScreen->height) y2 = intel->intelScreen->height;
100
101 memset(&bdr, 0, sizeof(bdr));
102 bdr.header.opcode = CMD_DRAW_RECT;
103 bdr.header.length = sizeof(bdr)/4 - 2;
104 bdr.xmin = x1;
105 bdr.ymin = y1;
106 bdr.xmax = x2;
107 bdr.ymax = y2;
108 bdr.xorg = dPriv->x;
109 bdr.yorg = dPriv->y;
110
111 /* Can't use BRW_CACHED_BATCH_STRUCT because this is also emitted
112 * uncached in brw_draw.c:
113 */
114 BRW_BATCH_STRUCT(brw, &bdr);
115 }
116
117 const struct brw_tracked_state brw_drawing_rect = {
118 .dirty = {
119 .mesa = _NEW_WINDOW_POS,
120 .brw = 0,
121 .cache = 0
122 },
123 .update = upload_drawing_rect
124 };
125
126 /**
127 * Upload the binding table pointers, which point each stage's array of surface
128 * state pointers.
129 *
130 * The binding table pointers are relative to the surface state base address,
131 * which is the BRW_SS_POOL cache buffer.
132 */
133 static void upload_binding_table_pointers(struct brw_context *brw)
134 {
135 struct brw_binding_table_pointers btp;
136 memset(&btp, 0, sizeof(btp));
137
138 btp.header.opcode = CMD_BINDING_TABLE_PTRS;
139 btp.header.length = sizeof(btp)/4 - 2;
140 btp.vs = 0;
141 btp.gs = 0;
142 btp.clp = 0;
143 btp.sf = 0;
144 btp.wm = brw->wm.bind_ss_offset;
145
146 BRW_CACHED_BATCH_STRUCT(brw, &btp);
147 }
148
149 const struct brw_tracked_state brw_binding_table_pointers = {
150 .dirty = {
151 .mesa = 0,
152 .brw = 0,
153 .cache = CACHE_NEW_SURF_BIND
154 },
155 .update = upload_binding_table_pointers
156 };
157
158
159 /**
160 * Upload pointers to the per-stage state.
161 *
162 * The state pointers in this packet are all relative to the general state
163 * base address set by CMD_STATE_BASE_ADDRESS, which is the BRW_GS_POOL buffer.
164 */
165 static void upload_pipelined_state_pointers(struct brw_context *brw )
166 {
167 struct brw_pipelined_state_pointers psp;
168 memset(&psp, 0, sizeof(psp));
169
170 psp.header.opcode = CMD_PIPELINED_STATE_POINTERS;
171 psp.header.length = sizeof(psp)/4 - 2;
172
173 psp.vs.offset = brw->vs.state_gs_offset >> 5;
174 psp.sf.offset = brw->sf.state_gs_offset >> 5;
175 psp.wm.offset = brw->wm.state_gs_offset >> 5;
176 psp.cc.offset = brw->cc.state_gs_offset >> 5;
177
178 /* GS gets turned on and off regularly. Need to re-emit URB fence
179 * after this occurs.
180 */
181 if (brw->gs.prog_active) {
182 psp.gs.offset = brw->gs.state_gs_offset >> 5;
183 psp.gs.enable = 1;
184 }
185
186 if (!brw->metaops.active) {
187 psp.clp.offset = brw->clip.state_gs_offset >> 5;
188 psp.clp.enable = 1;
189 }
190
191
192 if (BRW_CACHED_BATCH_STRUCT(brw, &psp))
193 brw->state.dirty.brw |= BRW_NEW_PSP;
194 }
195
196 const struct brw_tracked_state brw_pipelined_state_pointers = {
197 .dirty = {
198 .mesa = 0,
199 .brw = BRW_NEW_METAOPS,
200 .cache = (CACHE_NEW_VS_UNIT |
201 CACHE_NEW_GS_UNIT |
202 CACHE_NEW_GS_PROG |
203 CACHE_NEW_CLIP_UNIT |
204 CACHE_NEW_SF_UNIT |
205 CACHE_NEW_WM_UNIT |
206 CACHE_NEW_CC_UNIT)
207 },
208 .update = upload_pipelined_state_pointers
209 };
210
211 static void upload_psp_urb_cbs(struct brw_context *brw )
212 {
213 upload_pipelined_state_pointers(brw);
214 brw_upload_urb_fence(brw);
215 brw_upload_constant_buffer_state(brw);
216 }
217
218
219 const struct brw_tracked_state brw_psp_urb_cbs = {
220 .dirty = {
221 .mesa = 0,
222 .brw = BRW_NEW_URB_FENCE | BRW_NEW_METAOPS,
223 .cache = (CACHE_NEW_VS_UNIT |
224 CACHE_NEW_GS_UNIT |
225 CACHE_NEW_GS_PROG |
226 CACHE_NEW_CLIP_UNIT |
227 CACHE_NEW_SF_UNIT |
228 CACHE_NEW_WM_UNIT |
229 CACHE_NEW_CC_UNIT)
230 },
231 .update = upload_psp_urb_cbs
232 };
233
234 /**
235 * Upload the depthbuffer offset and format.
236 *
237 * We have to do this per state validation as we need to emit the relocation
238 * in the batch buffer.
239 */
240 static void upload_depthbuffer(struct brw_context *brw)
241 {
242 struct intel_context *intel = &brw->intel;
243 struct intel_region *region = brw->state.depth_region;
244
245 unsigned int format;
246
247 switch (region->cpp) {
248 case 2:
249 format = BRW_DEPTHFORMAT_D16_UNORM;
250 break;
251 case 4:
252 if (intel->depth_buffer_is_float)
253 format = BRW_DEPTHFORMAT_D32_FLOAT;
254 else
255 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
256 break;
257 default:
258 assert(0);
259 return;
260 }
261
262 BEGIN_BATCH(5, INTEL_BATCH_NO_CLIPRECTS);
263 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (5 - 2));
264 OUT_BATCH(((region->pitch * region->cpp) - 1) |
265 (format << 18) |
266 (BRW_TILEWALK_YMAJOR << 26) |
267 (region->tiled << 27) |
268 (BRW_SURFACE_2D << 29));
269 OUT_RELOC(region->buffer,
270 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ | DRM_BO_FLAG_WRITE, 0);
271 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
272 ((region->pitch - 1) << 6) |
273 ((region->height - 1) << 19));
274 OUT_BATCH(0);
275 ADVANCE_BATCH();
276 }
277
278 const struct brw_tracked_state brw_depthbuffer = {
279 .update = upload_depthbuffer,
280 .always_update = GL_TRUE,
281 };
282
283
284
285 /***********************************************************************
286 * Polygon stipple packet
287 */
288
289 static void upload_polygon_stipple(struct brw_context *brw)
290 {
291 struct brw_polygon_stipple bps;
292 GLuint i;
293
294 memset(&bps, 0, sizeof(bps));
295 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
296 bps.header.length = sizeof(bps)/4-2;
297
298 for (i = 0; i < 32; i++)
299 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
300
301 BRW_CACHED_BATCH_STRUCT(brw, &bps);
302 }
303
304 const struct brw_tracked_state brw_polygon_stipple = {
305 .dirty = {
306 .mesa = _NEW_POLYGONSTIPPLE,
307 .brw = 0,
308 .cache = 0
309 },
310 .update = upload_polygon_stipple
311 };
312
313
314 /***********************************************************************
315 * Polygon stipple offset packet
316 */
317
318 static void upload_polygon_stipple_offset(struct brw_context *brw)
319 {
320 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
321 struct brw_polygon_stipple_offset bpso;
322
323 memset(&bpso, 0, sizeof(bpso));
324 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
325 bpso.header.length = sizeof(bpso)/4-2;
326
327 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
328 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
329
330 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
331 }
332
333 const struct brw_tracked_state brw_polygon_stipple_offset = {
334 .dirty = {
335 .mesa = _NEW_WINDOW_POS,
336 .brw = 0,
337 .cache = 0
338 },
339 .update = upload_polygon_stipple_offset
340 };
341
342 /***********************************************************************
343 * Line stipple packet
344 */
345
346 static void upload_line_stipple(struct brw_context *brw)
347 {
348 struct brw_line_stipple bls;
349 GLfloat tmp;
350 GLint tmpi;
351
352 memset(&bls, 0, sizeof(bls));
353 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
354 bls.header.length = sizeof(bls)/4 - 2;
355
356 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
357 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
358
359 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
360 tmpi = tmp * (1<<13);
361
362
363 bls.bits1.inverse_repeat_count = tmpi;
364
365 BRW_CACHED_BATCH_STRUCT(brw, &bls);
366 }
367
368 const struct brw_tracked_state brw_line_stipple = {
369 .dirty = {
370 .mesa = _NEW_LINE,
371 .brw = 0,
372 .cache = 0
373 },
374 .update = upload_line_stipple
375 };
376
377
378
379 /***********************************************************************
380 * Misc constant state packets
381 */
382
383 static void upload_pipe_control(struct brw_context *brw)
384 {
385 struct brw_pipe_control pc;
386
387 return;
388
389 memset(&pc, 0, sizeof(pc));
390
391 pc.header.opcode = CMD_PIPE_CONTROL;
392 pc.header.length = sizeof(pc)/4 - 2;
393 pc.header.post_sync_operation = PIPE_CONTROL_NOWRITE;
394
395 pc.header.instruction_state_cache_flush_enable = 1;
396
397 pc.bits1.dest_addr_type = PIPE_CONTROL_GTTWRITE_GLOBAL;
398
399 BRW_BATCH_STRUCT(brw, &pc);
400 }
401
402 const struct brw_tracked_state brw_pipe_control = {
403 .dirty = {
404 .mesa = 0,
405 .brw = BRW_NEW_CONTEXT,
406 .cache = 0
407 },
408 .update = upload_pipe_control
409 };
410
411
412 /***********************************************************************
413 * Misc invarient state packets
414 */
415
416 static void upload_invarient_state( struct brw_context *brw )
417 {
418 {
419 /* 0x61040000 Pipeline Select */
420 /* PipelineSelect : 0 */
421 struct brw_pipeline_select ps;
422
423 memset(&ps, 0, sizeof(ps));
424 ps.header.opcode = CMD_PIPELINE_SELECT;
425 ps.header.pipeline_select = 0;
426 BRW_BATCH_STRUCT(brw, &ps);
427 }
428
429 {
430 struct brw_global_depth_offset_clamp gdo;
431 memset(&gdo, 0, sizeof(gdo));
432
433 /* Disable depth offset clamping.
434 */
435 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
436 gdo.header.length = sizeof(gdo)/4 - 2;
437 gdo.depth_offset_clamp = 0.0;
438
439 BRW_BATCH_STRUCT(brw, &gdo);
440 }
441
442
443 /* 0x61020000 State Instruction Pointer */
444 {
445 struct brw_system_instruction_pointer sip;
446 memset(&sip, 0, sizeof(sip));
447
448 sip.header.opcode = CMD_STATE_INSN_POINTER;
449 sip.header.length = 0;
450 sip.bits0.pad = 0;
451 sip.bits0.system_instruction_pointer = 0;
452 BRW_BATCH_STRUCT(brw, &sip);
453 }
454
455
456 {
457 struct brw_vf_statistics vfs;
458 memset(&vfs, 0, sizeof(vfs));
459
460 vfs.opcode = CMD_VF_STATISTICS;
461 if (INTEL_DEBUG & DEBUG_STATS)
462 vfs.statistics_enable = 1;
463
464 BRW_BATCH_STRUCT(brw, &vfs);
465 }
466 }
467
468 const struct brw_tracked_state brw_invarient_state = {
469 .dirty = {
470 .mesa = 0,
471 .brw = BRW_NEW_CONTEXT,
472 .cache = 0
473 },
474 .update = upload_invarient_state
475 };
476
477 /**
478 * Define the base addresses which some state is referenced from.
479 *
480 * This allows us to avoid having to emit relocations in many places for
481 * cached state, and instead emit pointers inside of large, mostly-static
482 * state pools. This comes at the expense of memory, and more expensive cache
483 * misses.
484 */
485 static void upload_state_base_address( struct brw_context *brw )
486 {
487 struct intel_context *intel = &brw->intel;
488
489 /* Output the structure (brw_state_base_address) directly to the
490 * batchbuffer, so we can emit relocations inline.
491 */
492 BEGIN_BATCH(6, INTEL_BATCH_NO_CLIPRECTS);
493 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
494 OUT_RELOC(brw->pool[BRW_GS_POOL].buffer,
495 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
496 1); /* General state base address */
497 OUT_RELOC(brw->pool[BRW_SS_POOL].buffer,
498 DRM_BO_FLAG_MEM_TT | DRM_BO_FLAG_READ,
499 1); /* Surface state base address */
500 OUT_BATCH(1); /* Indirect object base address */
501 OUT_BATCH(1); /* General state upper bound */
502 OUT_BATCH(1); /* Indirect object upper bound */
503 ADVANCE_BATCH();
504 }
505
506
507 const struct brw_tracked_state brw_state_base_address = {
508 .always_update = GL_TRUE,
509 .update = upload_state_base_address
510 };