i965: Remove brw->attribs now that we can just always look in the GLcontext.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 GLcontext *ctx = &brw->intel.ctx;
52 struct brw_blend_constant_color bcc;
53
54 memset(&bcc, 0, sizeof(bcc));
55 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
56 bcc.header.length = sizeof(bcc)/4-2;
57 bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
58 bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
59 bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
60 bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
61
62 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
63 }
64
65
66 const struct brw_tracked_state brw_blend_constant_color = {
67 .dirty = {
68 .mesa = _NEW_COLOR,
69 .brw = 0,
70 .cache = 0
71 },
72 .emit = upload_blend_constant_color
73 };
74
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context *brw)
77 {
78 struct intel_context *intel = &brw->intel;
79 GLcontext *ctx = &intel->ctx;
80
81 if (!intel->constant_cliprect)
82 return;
83
84 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
85 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
86 OUT_BATCH(0); /* xmin, ymin */
87 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
88 ((ctx->DrawBuffer->Height - 1) << 16));
89 OUT_BATCH(0);
90 ADVANCE_BATCH();
91 }
92
93 const struct brw_tracked_state brw_drawing_rect = {
94 .dirty = {
95 .mesa = _NEW_BUFFERS,
96 .brw = 0,
97 .cache = 0
98 },
99 .emit = upload_drawing_rect
100 };
101
102 static void prepare_binding_table_pointers(struct brw_context *brw)
103 {
104 brw_add_validated_bo(brw, brw->wm.bind_bo);
105 }
106
107 /**
108 * Upload the binding table pointers, which point each stage's array of surface
109 * state pointers.
110 *
111 * The binding table pointers are relative to the surface state base address,
112 * which is 0.
113 */
114 static void upload_binding_table_pointers(struct brw_context *brw)
115 {
116 struct intel_context *intel = &brw->intel;
117
118 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
119 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
120 OUT_BATCH(0); /* vs */
121 OUT_BATCH(0); /* gs */
122 OUT_BATCH(0); /* clip */
123 OUT_BATCH(0); /* sf */
124 OUT_RELOC(brw->wm.bind_bo,
125 I915_GEM_DOMAIN_SAMPLER, 0,
126 0);
127 ADVANCE_BATCH();
128 }
129
130 const struct brw_tracked_state brw_binding_table_pointers = {
131 .dirty = {
132 .mesa = 0,
133 .brw = BRW_NEW_BATCH,
134 .cache = CACHE_NEW_SURF_BIND,
135 },
136 .prepare = prepare_binding_table_pointers,
137 .emit = upload_binding_table_pointers,
138 };
139
140
141 /**
142 * Upload pointers to the per-stage state.
143 *
144 * The state pointers in this packet are all relative to the general state
145 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
146 */
147 static void upload_pipelined_state_pointers(struct brw_context *brw )
148 {
149 struct intel_context *intel = &brw->intel;
150
151 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
152 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
153 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
154 if (brw->gs.prog_active)
155 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
156 else
157 OUT_BATCH(0);
158 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
159 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
160 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
161 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
162 ADVANCE_BATCH();
163
164 brw->state.dirty.brw |= BRW_NEW_PSP;
165 }
166
167
168 static void prepare_psp_urb_cbs(struct brw_context *brw)
169 {
170 brw_add_validated_bo(brw, brw->vs.state_bo);
171 brw_add_validated_bo(brw, brw->gs.state_bo);
172 brw_add_validated_bo(brw, brw->clip.state_bo);
173 brw_add_validated_bo(brw, brw->wm.state_bo);
174 brw_add_validated_bo(brw, brw->cc.state_bo);
175 }
176
177 static void upload_psp_urb_cbs(struct brw_context *brw )
178 {
179 upload_pipelined_state_pointers(brw);
180 brw_upload_urb_fence(brw);
181 brw_upload_constant_buffer_state(brw);
182 }
183
184 const struct brw_tracked_state brw_psp_urb_cbs = {
185 .dirty = {
186 .mesa = 0,
187 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
188 .cache = (CACHE_NEW_VS_UNIT |
189 CACHE_NEW_GS_UNIT |
190 CACHE_NEW_GS_PROG |
191 CACHE_NEW_CLIP_UNIT |
192 CACHE_NEW_SF_UNIT |
193 CACHE_NEW_WM_UNIT |
194 CACHE_NEW_CC_UNIT)
195 },
196 .prepare = prepare_psp_urb_cbs,
197 .emit = upload_psp_urb_cbs,
198 };
199
200 static void prepare_depthbuffer(struct brw_context *brw)
201 {
202 struct intel_region *region = brw->state.depth_region;
203
204 if (region != NULL)
205 brw_add_validated_bo(brw, region->buffer);
206 }
207
208 static void emit_depthbuffer(struct brw_context *brw)
209 {
210 struct intel_context *intel = &brw->intel;
211 struct intel_region *region = brw->state.depth_region;
212 unsigned int len = BRW_IS_G4X(brw) ? 6 : 5;
213
214 if (region == NULL) {
215 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
216 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
217 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
218 (BRW_SURFACE_NULL << 29));
219 OUT_BATCH(0);
220 OUT_BATCH(0);
221 OUT_BATCH(0);
222
223 if (BRW_IS_G4X(brw))
224 OUT_BATCH(0);
225
226 ADVANCE_BATCH();
227 } else {
228 unsigned int format;
229
230 switch (region->cpp) {
231 case 2:
232 format = BRW_DEPTHFORMAT_D16_UNORM;
233 break;
234 case 4:
235 if (intel->depth_buffer_is_float)
236 format = BRW_DEPTHFORMAT_D32_FLOAT;
237 else
238 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
239 break;
240 default:
241 assert(0);
242 return;
243 }
244
245 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
246 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
247 OUT_BATCH(((region->pitch * region->cpp) - 1) |
248 (format << 18) |
249 (BRW_TILEWALK_YMAJOR << 26) |
250 ((region->tiling != I915_TILING_NONE) << 27) |
251 (BRW_SURFACE_2D << 29));
252 OUT_RELOC(region->buffer,
253 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
254 0);
255 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
256 ((region->pitch - 1) << 6) |
257 ((region->height - 1) << 19));
258 OUT_BATCH(0);
259
260 if (BRW_IS_G4X(brw))
261 OUT_BATCH(0);
262
263 ADVANCE_BATCH();
264 }
265 }
266
267 const struct brw_tracked_state brw_depthbuffer = {
268 .dirty = {
269 .mesa = 0,
270 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
271 .cache = 0,
272 },
273 .prepare = prepare_depthbuffer,
274 .emit = emit_depthbuffer,
275 };
276
277
278
279 /***********************************************************************
280 * Polygon stipple packet
281 */
282
283 static void upload_polygon_stipple(struct brw_context *brw)
284 {
285 GLcontext *ctx = &brw->intel.ctx;
286 struct brw_polygon_stipple bps;
287 GLuint i;
288
289 memset(&bps, 0, sizeof(bps));
290 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
291 bps.header.length = sizeof(bps)/4-2;
292
293 for (i = 0; i < 32; i++)
294 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
295
296 BRW_CACHED_BATCH_STRUCT(brw, &bps);
297 }
298
299 const struct brw_tracked_state brw_polygon_stipple = {
300 .dirty = {
301 .mesa = _NEW_POLYGONSTIPPLE,
302 .brw = 0,
303 .cache = 0
304 },
305 .emit = upload_polygon_stipple
306 };
307
308
309 /***********************************************************************
310 * Polygon stipple offset packet
311 */
312
313 static void upload_polygon_stipple_offset(struct brw_context *brw)
314 {
315 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
316 struct brw_polygon_stipple_offset bpso;
317
318 memset(&bpso, 0, sizeof(bpso));
319 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
320 bpso.header.length = sizeof(bpso)/4-2;
321
322 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
323 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
324
325 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
326 }
327
328 #define _NEW_WINDOW_POS 0x40000000
329
330 const struct brw_tracked_state brw_polygon_stipple_offset = {
331 .dirty = {
332 .mesa = _NEW_WINDOW_POS,
333 .brw = 0,
334 .cache = 0
335 },
336 .emit = upload_polygon_stipple_offset
337 };
338
339 /**********************************************************************
340 * AA Line parameters
341 */
342 static void upload_aa_line_parameters(struct brw_context *brw)
343 {
344 struct brw_aa_line_parameters balp;
345
346 if (!BRW_IS_G4X(brw))
347 return;
348
349 /* use legacy aa line coverage computation */
350 memset(&balp, 0, sizeof(balp));
351 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
352 balp.header.length = sizeof(balp) / 4 - 2;
353
354 BRW_CACHED_BATCH_STRUCT(brw, &balp);
355 }
356
357 const struct brw_tracked_state brw_aa_line_parameters = {
358 .dirty = {
359 .mesa = 0,
360 .brw = BRW_NEW_CONTEXT,
361 .cache = 0
362 },
363 .emit = upload_aa_line_parameters
364 };
365
366 /***********************************************************************
367 * Line stipple packet
368 */
369
370 static void upload_line_stipple(struct brw_context *brw)
371 {
372 GLcontext *ctx = &brw->intel.ctx;
373 struct brw_line_stipple bls;
374 GLfloat tmp;
375 GLint tmpi;
376
377 memset(&bls, 0, sizeof(bls));
378 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
379 bls.header.length = sizeof(bls)/4 - 2;
380
381 bls.bits0.pattern = ctx->Line.StipplePattern;
382 bls.bits1.repeat_count = ctx->Line.StippleFactor;
383
384 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
385 tmpi = tmp * (1<<13);
386
387
388 bls.bits1.inverse_repeat_count = tmpi;
389
390 BRW_CACHED_BATCH_STRUCT(brw, &bls);
391 }
392
393 const struct brw_tracked_state brw_line_stipple = {
394 .dirty = {
395 .mesa = _NEW_LINE,
396 .brw = 0,
397 .cache = 0
398 },
399 .emit = upload_line_stipple
400 };
401
402
403 /***********************************************************************
404 * Misc invarient state packets
405 */
406
407 static void upload_invarient_state( struct brw_context *brw )
408 {
409 {
410 /* 0x61040000 Pipeline Select */
411 /* PipelineSelect : 0 */
412 struct brw_pipeline_select ps;
413
414 memset(&ps, 0, sizeof(ps));
415 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
416 ps.header.pipeline_select = 0;
417 BRW_BATCH_STRUCT(brw, &ps);
418 }
419
420 {
421 struct brw_global_depth_offset_clamp gdo;
422 memset(&gdo, 0, sizeof(gdo));
423
424 /* Disable depth offset clamping.
425 */
426 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
427 gdo.header.length = sizeof(gdo)/4 - 2;
428 gdo.depth_offset_clamp = 0.0;
429
430 BRW_BATCH_STRUCT(brw, &gdo);
431 }
432
433
434 /* 0x61020000 State Instruction Pointer */
435 {
436 struct brw_system_instruction_pointer sip;
437 memset(&sip, 0, sizeof(sip));
438
439 sip.header.opcode = CMD_STATE_INSN_POINTER;
440 sip.header.length = 0;
441 sip.bits0.pad = 0;
442 sip.bits0.system_instruction_pointer = 0;
443 BRW_BATCH_STRUCT(brw, &sip);
444 }
445
446
447 {
448 struct brw_vf_statistics vfs;
449 memset(&vfs, 0, sizeof(vfs));
450
451 vfs.opcode = CMD_VF_STATISTICS(brw);
452 if (INTEL_DEBUG & DEBUG_STATS)
453 vfs.statistics_enable = 1;
454
455 BRW_BATCH_STRUCT(brw, &vfs);
456 }
457 }
458
459 const struct brw_tracked_state brw_invarient_state = {
460 .dirty = {
461 .mesa = 0,
462 .brw = BRW_NEW_CONTEXT,
463 .cache = 0
464 },
465 .emit = upload_invarient_state
466 };
467
468 /**
469 * Define the base addresses which some state is referenced from.
470 *
471 * This allows us to avoid having to emit relocations in many places for
472 * cached state, and instead emit pointers inside of large, mostly-static
473 * state pools. This comes at the expense of memory, and more expensive cache
474 * misses.
475 */
476 static void upload_state_base_address( struct brw_context *brw )
477 {
478 struct intel_context *intel = &brw->intel;
479
480 /* Output the structure (brw_state_base_address) directly to the
481 * batchbuffer, so we can emit relocations inline.
482 */
483 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
484 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
485 OUT_BATCH(1); /* General state base address */
486 OUT_BATCH(1); /* Surface state base address */
487 OUT_BATCH(1); /* Indirect object base address */
488 OUT_BATCH(1); /* General state upper bound */
489 OUT_BATCH(1); /* Indirect object upper bound */
490 ADVANCE_BATCH();
491 }
492
493 const struct brw_tracked_state brw_state_base_address = {
494 .dirty = {
495 .mesa = 0,
496 .brw = BRW_NEW_CONTEXT,
497 .cache = 0,
498 },
499 .emit = upload_state_base_address
500 };