intel: Replace IS_G4X() across the driver with context structure usage.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 GLcontext *ctx = &brw->intel.ctx;
52 struct brw_blend_constant_color bcc;
53
54 memset(&bcc, 0, sizeof(bcc));
55 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
56 bcc.header.length = sizeof(bcc)/4-2;
57 bcc.blend_constant_color[0] = ctx->Color.BlendColor[0];
58 bcc.blend_constant_color[1] = ctx->Color.BlendColor[1];
59 bcc.blend_constant_color[2] = ctx->Color.BlendColor[2];
60 bcc.blend_constant_color[3] = ctx->Color.BlendColor[3];
61
62 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
63 }
64
65
66 const struct brw_tracked_state brw_blend_constant_color = {
67 .dirty = {
68 .mesa = _NEW_COLOR,
69 .brw = BRW_NEW_CONTEXT,
70 .cache = 0
71 },
72 .emit = upload_blend_constant_color
73 };
74
75 /* Constant single cliprect for framebuffer object or DRI2 drawing */
76 static void upload_drawing_rect(struct brw_context *brw)
77 {
78 struct intel_context *intel = &brw->intel;
79 GLcontext *ctx = &intel->ctx;
80
81 if (!intel->constant_cliprect)
82 return;
83
84 BEGIN_BATCH(4, NO_LOOP_CLIPRECTS);
85 OUT_BATCH(_3DSTATE_DRAWRECT_INFO_I965);
86 OUT_BATCH(0); /* xmin, ymin */
87 OUT_BATCH(((ctx->DrawBuffer->Width - 1) & 0xffff) |
88 ((ctx->DrawBuffer->Height - 1) << 16));
89 OUT_BATCH(0);
90 ADVANCE_BATCH();
91 }
92
93 const struct brw_tracked_state brw_drawing_rect = {
94 .dirty = {
95 .mesa = _NEW_BUFFERS,
96 .brw = BRW_NEW_CONTEXT,
97 .cache = 0
98 },
99 .emit = upload_drawing_rect
100 };
101
102 static void prepare_binding_table_pointers(struct brw_context *brw)
103 {
104 brw_add_validated_bo(brw, brw->vs.bind_bo);
105 brw_add_validated_bo(brw, brw->wm.bind_bo);
106 }
107
108 /**
109 * Upload the binding table pointers, which point each stage's array of surface
110 * state pointers.
111 *
112 * The binding table pointers are relative to the surface state base address,
113 * which is 0.
114 */
115 static void upload_binding_table_pointers(struct brw_context *brw)
116 {
117 struct intel_context *intel = &brw->intel;
118
119 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
120 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
121 if (brw->vs.bind_bo != NULL)
122 OUT_RELOC(brw->vs.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* vs */
123 else
124 OUT_BATCH(0);
125 OUT_BATCH(0); /* gs */
126 OUT_BATCH(0); /* clip */
127 OUT_BATCH(0); /* sf */
128 OUT_RELOC(brw->wm.bind_bo, I915_GEM_DOMAIN_SAMPLER, 0, 0); /* wm/ps */
129 ADVANCE_BATCH();
130 }
131
132 const struct brw_tracked_state brw_binding_table_pointers = {
133 .dirty = {
134 .mesa = 0,
135 .brw = BRW_NEW_BATCH,
136 .cache = CACHE_NEW_SURF_BIND,
137 },
138 .prepare = prepare_binding_table_pointers,
139 .emit = upload_binding_table_pointers,
140 };
141
142
143 /**
144 * Upload pointers to the per-stage state.
145 *
146 * The state pointers in this packet are all relative to the general state
147 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
148 */
149 static void upload_pipelined_state_pointers(struct brw_context *brw )
150 {
151 struct intel_context *intel = &brw->intel;
152
153 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
154 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
155 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
156 if (brw->gs.prog_active)
157 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
158 else
159 OUT_BATCH(0);
160 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
161 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
162 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
163 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
164 ADVANCE_BATCH();
165
166 brw->state.dirty.brw |= BRW_NEW_PSP;
167 }
168
169
170 static void prepare_psp_urb_cbs(struct brw_context *brw)
171 {
172 brw_add_validated_bo(brw, brw->vs.state_bo);
173 brw_add_validated_bo(brw, brw->gs.state_bo);
174 brw_add_validated_bo(brw, brw->clip.state_bo);
175 brw_add_validated_bo(brw, brw->sf.state_bo);
176 brw_add_validated_bo(brw, brw->wm.state_bo);
177 brw_add_validated_bo(brw, brw->cc.state_bo);
178 }
179
180 static void upload_psp_urb_cbs(struct brw_context *brw )
181 {
182 upload_pipelined_state_pointers(brw);
183 brw_upload_urb_fence(brw);
184 brw_upload_cs_urb_state(brw);
185 }
186
187 const struct brw_tracked_state brw_psp_urb_cbs = {
188 .dirty = {
189 .mesa = 0,
190 .brw = BRW_NEW_URB_FENCE | BRW_NEW_BATCH,
191 .cache = (CACHE_NEW_VS_UNIT |
192 CACHE_NEW_GS_UNIT |
193 CACHE_NEW_GS_PROG |
194 CACHE_NEW_CLIP_UNIT |
195 CACHE_NEW_SF_UNIT |
196 CACHE_NEW_WM_UNIT |
197 CACHE_NEW_CC_UNIT)
198 },
199 .prepare = prepare_psp_urb_cbs,
200 .emit = upload_psp_urb_cbs,
201 };
202
203 static void prepare_depthbuffer(struct brw_context *brw)
204 {
205 struct intel_region *region = brw->state.depth_region;
206
207 if (region != NULL)
208 brw_add_validated_bo(brw, region->buffer);
209 }
210
211 static void emit_depthbuffer(struct brw_context *brw)
212 {
213 struct intel_context *intel = &brw->intel;
214 struct intel_region *region = brw->state.depth_region;
215 unsigned int len = (intel->is_g4x || intel->is_ironlake) ? 6 : 5;
216
217 if (region == NULL) {
218 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
219 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
220 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
221 (BRW_SURFACE_NULL << 29));
222 OUT_BATCH(0);
223 OUT_BATCH(0);
224 OUT_BATCH(0);
225
226 if (intel->is_g4x || intel->is_ironlake)
227 OUT_BATCH(0);
228
229 ADVANCE_BATCH();
230 } else {
231 unsigned int format;
232
233 switch (region->cpp) {
234 case 2:
235 format = BRW_DEPTHFORMAT_D16_UNORM;
236 break;
237 case 4:
238 if (intel->depth_buffer_is_float)
239 format = BRW_DEPTHFORMAT_D32_FLOAT;
240 else
241 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
242 break;
243 default:
244 assert(0);
245 return;
246 }
247
248 assert(region->tiling != I915_TILING_X);
249
250 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
251 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
252 OUT_BATCH(((region->pitch * region->cpp) - 1) |
253 (format << 18) |
254 (BRW_TILEWALK_YMAJOR << 26) |
255 ((region->tiling != I915_TILING_NONE) << 27) |
256 (BRW_SURFACE_2D << 29));
257 OUT_RELOC(region->buffer,
258 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
259 0);
260 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
261 ((region->pitch - 1) << 6) |
262 ((region->height - 1) << 19));
263 OUT_BATCH(0);
264
265 if (intel->is_g4x || intel->is_ironlake)
266 OUT_BATCH(0);
267
268 ADVANCE_BATCH();
269 }
270 }
271
272 const struct brw_tracked_state brw_depthbuffer = {
273 .dirty = {
274 .mesa = 0,
275 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
276 .cache = 0,
277 },
278 .prepare = prepare_depthbuffer,
279 .emit = emit_depthbuffer,
280 };
281
282
283
284 /***********************************************************************
285 * Polygon stipple packet
286 */
287
288 static void upload_polygon_stipple(struct brw_context *brw)
289 {
290 GLcontext *ctx = &brw->intel.ctx;
291 struct brw_polygon_stipple bps;
292 GLuint i;
293
294 memset(&bps, 0, sizeof(bps));
295 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
296 bps.header.length = sizeof(bps)/4-2;
297
298 /* Polygon stipple is provided in OpenGL order, i.e. bottom
299 * row first. If we're rendering to a window (i.e. the
300 * default frame buffer object, 0), then we need to invert
301 * it to match our pixel layout. But if we're rendering
302 * to a FBO (i.e. any named frame buffer object), we *don't*
303 * need to invert - we already match the layout.
304 */
305 if (ctx->DrawBuffer->Name == 0) {
306 for (i = 0; i < 32; i++)
307 bps.stipple[i] = ctx->PolygonStipple[31 - i]; /* invert */
308 }
309 else {
310 for (i = 0; i < 32; i++)
311 bps.stipple[i] = ctx->PolygonStipple[i]; /* don't invert */
312 }
313
314 BRW_CACHED_BATCH_STRUCT(brw, &bps);
315 }
316
317 const struct brw_tracked_state brw_polygon_stipple = {
318 .dirty = {
319 .mesa = _NEW_POLYGONSTIPPLE,
320 .brw = BRW_NEW_CONTEXT,
321 .cache = 0
322 },
323 .emit = upload_polygon_stipple
324 };
325
326
327 /***********************************************************************
328 * Polygon stipple offset packet
329 */
330
331 static void upload_polygon_stipple_offset(struct brw_context *brw)
332 {
333 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
334 struct brw_polygon_stipple_offset bpso;
335
336 memset(&bpso, 0, sizeof(bpso));
337 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
338 bpso.header.length = sizeof(bpso)/4-2;
339
340 /* If we're drawing to a system window (ctx->DrawBuffer->Name == 0),
341 * we have to invert the Y axis in order to match the OpenGL
342 * pixel coordinate system, and our offset must be matched
343 * to the window position. If we're drawing to a FBO
344 * (ctx->DrawBuffer->Name != 0), then our native pixel coordinate
345 * system works just fine, and there's no window system to
346 * worry about.
347 */
348 if (brw->intel.ctx.DrawBuffer->Name == 0) {
349 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
350 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
351 }
352 else {
353 bpso.bits0.y_offset = 0;
354 bpso.bits0.x_offset = 0;
355 }
356
357 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
358 }
359
360 #define _NEW_WINDOW_POS 0x40000000
361
362 const struct brw_tracked_state brw_polygon_stipple_offset = {
363 .dirty = {
364 .mesa = _NEW_WINDOW_POS,
365 .brw = BRW_NEW_CONTEXT,
366 .cache = 0
367 },
368 .emit = upload_polygon_stipple_offset
369 };
370
371 /**********************************************************************
372 * AA Line parameters
373 */
374 static void upload_aa_line_parameters(struct brw_context *brw)
375 {
376 struct brw_aa_line_parameters balp;
377
378 if (BRW_IS_965(brw))
379 return;
380
381 /* use legacy aa line coverage computation */
382 memset(&balp, 0, sizeof(balp));
383 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
384 balp.header.length = sizeof(balp) / 4 - 2;
385
386 BRW_CACHED_BATCH_STRUCT(brw, &balp);
387 }
388
389 const struct brw_tracked_state brw_aa_line_parameters = {
390 .dirty = {
391 .mesa = 0,
392 .brw = BRW_NEW_CONTEXT,
393 .cache = 0
394 },
395 .emit = upload_aa_line_parameters
396 };
397
398 /***********************************************************************
399 * Line stipple packet
400 */
401
402 static void upload_line_stipple(struct brw_context *brw)
403 {
404 GLcontext *ctx = &brw->intel.ctx;
405 struct brw_line_stipple bls;
406 GLfloat tmp;
407 GLint tmpi;
408
409 memset(&bls, 0, sizeof(bls));
410 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
411 bls.header.length = sizeof(bls)/4 - 2;
412
413 bls.bits0.pattern = ctx->Line.StipplePattern;
414 bls.bits1.repeat_count = ctx->Line.StippleFactor;
415
416 tmp = 1.0 / (GLfloat) ctx->Line.StippleFactor;
417 tmpi = tmp * (1<<13);
418
419
420 bls.bits1.inverse_repeat_count = tmpi;
421
422 BRW_CACHED_BATCH_STRUCT(brw, &bls);
423 }
424
425 const struct brw_tracked_state brw_line_stipple = {
426 .dirty = {
427 .mesa = _NEW_LINE,
428 .brw = BRW_NEW_CONTEXT,
429 .cache = 0
430 },
431 .emit = upload_line_stipple
432 };
433
434
435 /***********************************************************************
436 * Misc invarient state packets
437 */
438
439 static void upload_invarient_state( struct brw_context *brw )
440 {
441 {
442 /* 0x61040000 Pipeline Select */
443 /* PipelineSelect : 0 */
444 struct brw_pipeline_select ps;
445
446 memset(&ps, 0, sizeof(ps));
447 ps.header.opcode = brw->CMD_PIPELINE_SELECT;
448 ps.header.pipeline_select = 0;
449 BRW_BATCH_STRUCT(brw, &ps);
450 }
451
452 {
453 struct brw_global_depth_offset_clamp gdo;
454 memset(&gdo, 0, sizeof(gdo));
455
456 /* Disable depth offset clamping.
457 */
458 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
459 gdo.header.length = sizeof(gdo)/4 - 2;
460 gdo.depth_offset_clamp = 0.0;
461
462 BRW_BATCH_STRUCT(brw, &gdo);
463 }
464
465
466 /* 0x61020000 State Instruction Pointer */
467 {
468 struct brw_system_instruction_pointer sip;
469 memset(&sip, 0, sizeof(sip));
470
471 sip.header.opcode = CMD_STATE_INSN_POINTER;
472 sip.header.length = 0;
473 sip.bits0.pad = 0;
474 sip.bits0.system_instruction_pointer = 0;
475 BRW_BATCH_STRUCT(brw, &sip);
476 }
477
478
479 {
480 struct brw_vf_statistics vfs;
481 memset(&vfs, 0, sizeof(vfs));
482
483 vfs.opcode = brw->CMD_VF_STATISTICS;
484 if (INTEL_DEBUG & DEBUG_STATS)
485 vfs.statistics_enable = 1;
486
487 BRW_BATCH_STRUCT(brw, &vfs);
488 }
489 }
490
491 const struct brw_tracked_state brw_invarient_state = {
492 .dirty = {
493 .mesa = 0,
494 .brw = BRW_NEW_CONTEXT,
495 .cache = 0
496 },
497 .emit = upload_invarient_state
498 };
499
500 /**
501 * Define the base addresses which some state is referenced from.
502 *
503 * This allows us to avoid having to emit relocations in many places for
504 * cached state, and instead emit pointers inside of large, mostly-static
505 * state pools. This comes at the expense of memory, and more expensive cache
506 * misses.
507 */
508 static void upload_state_base_address( struct brw_context *brw )
509 {
510 struct intel_context *intel = &brw->intel;
511
512 /* Output the structure (brw_state_base_address) directly to the
513 * batchbuffer, so we can emit relocations inline.
514 */
515 if (intel->is_ironlake) {
516 BEGIN_BATCH(8, IGNORE_CLIPRECTS);
517 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (8 - 2));
518 OUT_BATCH(1); /* General state base address */
519 OUT_BATCH(1); /* Surface state base address */
520 OUT_BATCH(1); /* Indirect object base address */
521 OUT_BATCH(1); /* Instruction base address */
522 OUT_BATCH(1); /* General state upper bound */
523 OUT_BATCH(1); /* Indirect object upper bound */
524 OUT_BATCH(1); /* Instruction access upper bound */
525 ADVANCE_BATCH();
526 } else {
527 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
528 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
529 OUT_BATCH(1); /* General state base address */
530 OUT_BATCH(1); /* Surface state base address */
531 OUT_BATCH(1); /* Indirect object base address */
532 OUT_BATCH(1); /* General state upper bound */
533 OUT_BATCH(1); /* Indirect object upper bound */
534 ADVANCE_BATCH();
535 }
536 }
537
538 const struct brw_tracked_state brw_state_base_address = {
539 .dirty = {
540 .mesa = 0,
541 .brw = BRW_NEW_CONTEXT,
542 .cache = 0,
543 },
544 .emit = upload_state_base_address
545 };