i965: don't emit state when dri_bufmgr_check_aperture_space fails.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_misc_state.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "intel_batchbuffer.h"
35 #include "intel_regions.h"
36
37 #include "brw_context.h"
38 #include "brw_state.h"
39 #include "brw_defines.h"
40
41
42
43
44
45 /***********************************************************************
46 * Blend color
47 */
48
49 static void upload_blend_constant_color(struct brw_context *brw)
50 {
51 struct brw_blend_constant_color bcc;
52
53 memset(&bcc, 0, sizeof(bcc));
54 bcc.header.opcode = CMD_BLEND_CONSTANT_COLOR;
55 bcc.header.length = sizeof(bcc)/4-2;
56 bcc.blend_constant_color[0] = brw->attribs.Color->BlendColor[0];
57 bcc.blend_constant_color[1] = brw->attribs.Color->BlendColor[1];
58 bcc.blend_constant_color[2] = brw->attribs.Color->BlendColor[2];
59 bcc.blend_constant_color[3] = brw->attribs.Color->BlendColor[3];
60
61 BRW_CACHED_BATCH_STRUCT(brw, &bcc);
62 }
63
64
65 const struct brw_tracked_state brw_blend_constant_color = {
66 .dirty = {
67 .mesa = _NEW_COLOR,
68 .brw = 0,
69 .cache = 0
70 },
71 .emit = upload_blend_constant_color
72 };
73
74 /**
75 * Upload the binding table pointers, which point each stage's array of surface
76 * state pointers.
77 *
78 * The binding table pointers are relative to the surface state base address,
79 * which is 0.
80 */
81 static void upload_binding_table_pointers(struct brw_context *brw)
82 {
83 struct intel_context *intel = &brw->intel;
84 dri_bo *aper_array[] = {
85 intel->batch->buf,
86 brw->wm.bind_bo,
87 };
88
89 if (dri_bufmgr_check_aperture_space(aper_array, ARRAY_SIZE(aper_array))) {
90 intel_batchbuffer_flush(intel->batch);
91 return;
92 }
93
94 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
95 OUT_BATCH(CMD_BINDING_TABLE_PTRS << 16 | (6 - 2));
96 OUT_BATCH(0); /* vs */
97 OUT_BATCH(0); /* gs */
98 OUT_BATCH(0); /* clip */
99 OUT_BATCH(0); /* sf */
100 OUT_RELOC(brw->wm.bind_bo,
101 I915_GEM_DOMAIN_SAMPLER, 0,
102 0);
103 ADVANCE_BATCH();
104 }
105
106 const struct brw_tracked_state brw_binding_table_pointers = {
107 .dirty = {
108 .mesa = 0,
109 .brw = BRW_NEW_BATCH,
110 .cache = CACHE_NEW_SURF_BIND,
111 },
112 .emit = upload_binding_table_pointers,
113 };
114
115
116 /**
117 * Upload pointers to the per-stage state.
118 *
119 * The state pointers in this packet are all relative to the general state
120 * base address set by CMD_STATE_BASE_ADDRESS, which is 0.
121 */
122 static void upload_pipelined_state_pointers(struct brw_context *brw )
123 {
124 struct intel_context *intel = &brw->intel;
125
126 BEGIN_BATCH(7, IGNORE_CLIPRECTS);
127 OUT_BATCH(CMD_PIPELINED_STATE_POINTERS << 16 | (7 - 2));
128 OUT_RELOC(brw->vs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
129 if (brw->gs.prog_active)
130 OUT_RELOC(brw->gs.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
131 else
132 OUT_BATCH(0);
133 if (!brw->metaops.active)
134 OUT_RELOC(brw->clip.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 1);
135 else
136 OUT_BATCH(0);
137 OUT_RELOC(brw->sf.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
138 OUT_RELOC(brw->wm.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
139 OUT_RELOC(brw->cc.state_bo, I915_GEM_DOMAIN_INSTRUCTION, 0, 0);
140 ADVANCE_BATCH();
141
142 brw->state.dirty.brw |= BRW_NEW_PSP;
143 }
144
145 static void upload_psp_urb_cbs(struct brw_context *brw )
146 {
147 struct intel_context *intel = &brw->intel;
148 dri_bo *aper_array[] = {
149 intel->batch->buf,
150 brw->vs.state_bo,
151 brw->gs.state_bo,
152 brw->clip.state_bo,
153 brw->wm.state_bo,
154 brw->cc.state_bo,
155 };
156
157 if (dri_bufmgr_check_aperture_space(aper_array, ARRAY_SIZE(aper_array))) {
158 intel_batchbuffer_flush(intel->batch);
159 return;
160 }
161
162 upload_pipelined_state_pointers(brw);
163 brw_upload_urb_fence(brw);
164 brw_upload_constant_buffer_state(brw);
165 }
166
167 const struct brw_tracked_state brw_psp_urb_cbs = {
168 .dirty = {
169 .mesa = 0,
170 .brw = BRW_NEW_URB_FENCE | BRW_NEW_METAOPS | BRW_NEW_BATCH,
171 .cache = (CACHE_NEW_VS_UNIT |
172 CACHE_NEW_GS_UNIT |
173 CACHE_NEW_GS_PROG |
174 CACHE_NEW_CLIP_UNIT |
175 CACHE_NEW_SF_UNIT |
176 CACHE_NEW_WM_UNIT |
177 CACHE_NEW_CC_UNIT)
178 },
179 .emit = upload_psp_urb_cbs,
180 };
181
182 static void emit_depthbuffer(struct brw_context *brw)
183 {
184 struct intel_context *intel = &brw->intel;
185 struct intel_region *region = brw->state.depth_region;
186 unsigned int len = (BRW_IS_GM45(brw) || BRW_IS_G4X(brw)) ? sizeof(struct brw_depthbuffer_gm45_g4x) / 4 : sizeof(struct brw_depthbuffer) / 4;
187
188 if (region == NULL) {
189 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
190 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
191 OUT_BATCH((BRW_DEPTHFORMAT_D32_FLOAT << 18) |
192 (BRW_SURFACE_NULL << 29));
193 OUT_BATCH(0);
194 OUT_BATCH(0);
195 OUT_BATCH(0);
196
197 if (BRW_IS_GM45(brw) || BRW_IS_G4X(brw))
198 OUT_BATCH(0);
199
200 ADVANCE_BATCH();
201 } else {
202 unsigned int format;
203 dri_bo *aper_array[] = {
204 intel->batch->buf,
205 region->buffer
206 };
207
208 switch (region->cpp) {
209 case 2:
210 format = BRW_DEPTHFORMAT_D16_UNORM;
211 break;
212 case 4:
213 if (intel->depth_buffer_is_float)
214 format = BRW_DEPTHFORMAT_D32_FLOAT;
215 else
216 format = BRW_DEPTHFORMAT_D24_UNORM_S8_UINT;
217 break;
218 default:
219 assert(0);
220 return;
221 }
222
223 if (dri_bufmgr_check_aperture_space(aper_array, ARRAY_SIZE(aper_array))) {
224 intel_batchbuffer_flush(intel->batch);
225 return;
226 }
227
228 BEGIN_BATCH(len, IGNORE_CLIPRECTS);
229 OUT_BATCH(CMD_DEPTH_BUFFER << 16 | (len - 2));
230 OUT_BATCH(((region->pitch * region->cpp) - 1) |
231 (format << 18) |
232 (BRW_TILEWALK_YMAJOR << 26) |
233 ((region->tiling != I915_TILING_NONE) << 27) |
234 (BRW_SURFACE_2D << 29));
235 OUT_RELOC(region->buffer,
236 I915_GEM_DOMAIN_RENDER, I915_GEM_DOMAIN_RENDER,
237 0);
238 OUT_BATCH((BRW_SURFACE_MIPMAPLAYOUT_BELOW << 1) |
239 ((region->pitch - 1) << 6) |
240 ((region->height - 1) << 19));
241 OUT_BATCH(0);
242
243 if (BRW_IS_GM45(brw) || BRW_IS_G4X(brw))
244 OUT_BATCH(0);
245
246 ADVANCE_BATCH();
247 }
248 }
249
250 const struct brw_tracked_state brw_depthbuffer = {
251 .dirty = {
252 .mesa = 0,
253 .brw = BRW_NEW_DEPTH_BUFFER | BRW_NEW_BATCH,
254 .cache = 0,
255 },
256 .emit = emit_depthbuffer,
257 };
258
259
260
261 /***********************************************************************
262 * Polygon stipple packet
263 */
264
265 static void upload_polygon_stipple(struct brw_context *brw)
266 {
267 struct brw_polygon_stipple bps;
268 GLuint i;
269
270 memset(&bps, 0, sizeof(bps));
271 bps.header.opcode = CMD_POLY_STIPPLE_PATTERN;
272 bps.header.length = sizeof(bps)/4-2;
273
274 for (i = 0; i < 32; i++)
275 bps.stipple[i] = brw->attribs.PolygonStipple[31 - i]; /* invert */
276
277 BRW_CACHED_BATCH_STRUCT(brw, &bps);
278 }
279
280 const struct brw_tracked_state brw_polygon_stipple = {
281 .dirty = {
282 .mesa = _NEW_POLYGONSTIPPLE,
283 .brw = 0,
284 .cache = 0
285 },
286 .emit = upload_polygon_stipple
287 };
288
289
290 /***********************************************************************
291 * Polygon stipple offset packet
292 */
293
294 static void upload_polygon_stipple_offset(struct brw_context *brw)
295 {
296 __DRIdrawablePrivate *dPriv = brw->intel.driDrawable;
297 struct brw_polygon_stipple_offset bpso;
298
299 memset(&bpso, 0, sizeof(bpso));
300 bpso.header.opcode = CMD_POLY_STIPPLE_OFFSET;
301 bpso.header.length = sizeof(bpso)/4-2;
302
303 bpso.bits0.x_offset = (32 - (dPriv->x & 31)) & 31;
304 bpso.bits0.y_offset = (32 - ((dPriv->y + dPriv->h) & 31)) & 31;
305
306 BRW_CACHED_BATCH_STRUCT(brw, &bpso);
307 }
308
309 #define _NEW_WINDOW_POS 0x40000000
310
311 const struct brw_tracked_state brw_polygon_stipple_offset = {
312 .dirty = {
313 .mesa = _NEW_WINDOW_POS,
314 .brw = 0,
315 .cache = 0
316 },
317 .emit = upload_polygon_stipple_offset
318 };
319
320 /**********************************************************************
321 * AA Line parameters
322 */
323 static void upload_aa_line_parameters(struct brw_context *brw)
324 {
325 struct brw_aa_line_parameters balp;
326
327 if (!(BRW_IS_GM45(brw) || BRW_IS_G4X(brw)))
328 return;
329
330 /* use legacy aa line coverage computation */
331 memset(&balp, 0, sizeof(balp));
332 balp.header.opcode = CMD_AA_LINE_PARAMETERS;
333 balp.header.length = sizeof(balp) / 4 - 2;
334
335 BRW_CACHED_BATCH_STRUCT(brw, &balp);
336 }
337
338 const struct brw_tracked_state brw_aa_line_parameters = {
339 .dirty = {
340 .mesa = 0,
341 .brw = BRW_NEW_CONTEXT,
342 .cache = 0
343 },
344 .emit = upload_aa_line_parameters
345 };
346
347 /***********************************************************************
348 * Line stipple packet
349 */
350
351 static void upload_line_stipple(struct brw_context *brw)
352 {
353 struct brw_line_stipple bls;
354 GLfloat tmp;
355 GLint tmpi;
356
357 memset(&bls, 0, sizeof(bls));
358 bls.header.opcode = CMD_LINE_STIPPLE_PATTERN;
359 bls.header.length = sizeof(bls)/4 - 2;
360
361 bls.bits0.pattern = brw->attribs.Line->StipplePattern;
362 bls.bits1.repeat_count = brw->attribs.Line->StippleFactor;
363
364 tmp = 1.0 / (GLfloat) brw->attribs.Line->StippleFactor;
365 tmpi = tmp * (1<<13);
366
367
368 bls.bits1.inverse_repeat_count = tmpi;
369
370 BRW_CACHED_BATCH_STRUCT(brw, &bls);
371 }
372
373 const struct brw_tracked_state brw_line_stipple = {
374 .dirty = {
375 .mesa = _NEW_LINE,
376 .brw = 0,
377 .cache = 0
378 },
379 .emit = upload_line_stipple
380 };
381
382
383 /***********************************************************************
384 * Misc invarient state packets
385 */
386
387 static void upload_invarient_state( struct brw_context *brw )
388 {
389 {
390 /* 0x61040000 Pipeline Select */
391 /* PipelineSelect : 0 */
392 struct brw_pipeline_select ps;
393
394 memset(&ps, 0, sizeof(ps));
395 ps.header.opcode = CMD_PIPELINE_SELECT(brw);
396 ps.header.pipeline_select = 0;
397 BRW_BATCH_STRUCT(brw, &ps);
398 }
399
400 {
401 struct brw_global_depth_offset_clamp gdo;
402 memset(&gdo, 0, sizeof(gdo));
403
404 /* Disable depth offset clamping.
405 */
406 gdo.header.opcode = CMD_GLOBAL_DEPTH_OFFSET_CLAMP;
407 gdo.header.length = sizeof(gdo)/4 - 2;
408 gdo.depth_offset_clamp = 0.0;
409
410 BRW_BATCH_STRUCT(brw, &gdo);
411 }
412
413
414 /* 0x61020000 State Instruction Pointer */
415 {
416 struct brw_system_instruction_pointer sip;
417 memset(&sip, 0, sizeof(sip));
418
419 sip.header.opcode = CMD_STATE_INSN_POINTER;
420 sip.header.length = 0;
421 sip.bits0.pad = 0;
422 sip.bits0.system_instruction_pointer = 0;
423 BRW_BATCH_STRUCT(brw, &sip);
424 }
425
426
427 {
428 struct brw_vf_statistics vfs;
429 memset(&vfs, 0, sizeof(vfs));
430
431 vfs.opcode = CMD_VF_STATISTICS(brw);
432 if (INTEL_DEBUG & DEBUG_STATS)
433 vfs.statistics_enable = 1;
434
435 BRW_BATCH_STRUCT(brw, &vfs);
436 }
437 }
438
439 const struct brw_tracked_state brw_invarient_state = {
440 .dirty = {
441 .mesa = 0,
442 .brw = BRW_NEW_CONTEXT,
443 .cache = 0
444 },
445 .emit = upload_invarient_state
446 };
447
448 /**
449 * Define the base addresses which some state is referenced from.
450 *
451 * This allows us to avoid having to emit relocations in many places for
452 * cached state, and instead emit pointers inside of large, mostly-static
453 * state pools. This comes at the expense of memory, and more expensive cache
454 * misses.
455 */
456 static void upload_state_base_address( struct brw_context *brw )
457 {
458 struct intel_context *intel = &brw->intel;
459
460 /* Output the structure (brw_state_base_address) directly to the
461 * batchbuffer, so we can emit relocations inline.
462 */
463 BEGIN_BATCH(6, IGNORE_CLIPRECTS);
464 OUT_BATCH(CMD_STATE_BASE_ADDRESS << 16 | (6 - 2));
465 OUT_BATCH(1); /* General state base address */
466 OUT_BATCH(1); /* Surface state base address */
467 OUT_BATCH(1); /* Indirect object base address */
468 OUT_BATCH(1); /* General state upper bound */
469 OUT_BATCH(1); /* Indirect object upper bound */
470 ADVANCE_BATCH();
471 }
472
473 const struct brw_tracked_state brw_state_base_address = {
474 .dirty = {
475 .mesa = 0,
476 .brw = BRW_NEW_CONTEXT,
477 .cache = 0,
478 },
479 .emit = upload_state_base_address
480 };