i965: Set up sandybridge depthbuffer.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 #if 0
110 &brw_wm_input_sizes,
111 &brw_vs_prog,
112 &brw_gs_prog,
113 &brw_clip_prog,
114 &brw_sf_prog,
115 &brw_wm_prog,
116
117 /* Once all the programs are done, we know how large urb entry
118 * sizes need to be and can decide if we need to change the urb
119 * layout.
120 */
121 &brw_curbe_offsets,
122 &brw_recalculate_urb_fence,
123
124 &brw_cc_vp,
125 &brw_cc_unit,
126
127 &brw_vs_surfaces, /* must do before unit */
128 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
129 &brw_wm_surfaces, /* must do before samplers and unit */
130 &brw_wm_samplers,
131
132 &brw_wm_unit,
133 &brw_sf_vp,
134 &brw_sf_unit,
135 &brw_vs_unit, /* always required, enabled or not */
136 &brw_clip_unit,
137 &brw_gs_unit,
138
139 /* Command packets:
140 */
141 &brw_invarient_state,
142 &brw_state_base_address,
143
144 &brw_binding_table_pointers,
145 &brw_blend_constant_color,
146 #endif
147
148 &brw_depthbuffer,
149
150 #if 0
151 &brw_polygon_stipple,
152 &brw_polygon_stipple_offset,
153
154 &brw_line_stipple,
155 &brw_aa_line_parameters,
156
157 &brw_psp_urb_cbs,
158
159 &brw_drawing_rect,
160 &brw_indices,
161 &brw_index_buffer,
162 &brw_vertices,
163
164 &brw_constant_buffer
165 #endif
166 };
167
168 void brw_init_state( struct brw_context *brw )
169 {
170 brw_init_caches(brw);
171 }
172
173
174 void brw_destroy_state( struct brw_context *brw )
175 {
176 brw_destroy_caches(brw);
177 brw_destroy_batch_cache(brw);
178 }
179
180 /***********************************************************************
181 */
182
183 static GLboolean check_state( const struct brw_state_flags *a,
184 const struct brw_state_flags *b )
185 {
186 return ((a->mesa & b->mesa) ||
187 (a->brw & b->brw) ||
188 (a->cache & b->cache));
189 }
190
191 static void accumulate_state( struct brw_state_flags *a,
192 const struct brw_state_flags *b )
193 {
194 a->mesa |= b->mesa;
195 a->brw |= b->brw;
196 a->cache |= b->cache;
197 }
198
199
200 static void xor_states( struct brw_state_flags *result,
201 const struct brw_state_flags *a,
202 const struct brw_state_flags *b )
203 {
204 result->mesa = a->mesa ^ b->mesa;
205 result->brw = a->brw ^ b->brw;
206 result->cache = a->cache ^ b->cache;
207 }
208
209 void
210 brw_clear_validated_bos(struct brw_context *brw)
211 {
212 int i;
213
214 /* Clear the last round of validated bos */
215 for (i = 0; i < brw->state.validated_bo_count; i++) {
216 dri_bo_unreference(brw->state.validated_bos[i]);
217 brw->state.validated_bos[i] = NULL;
218 }
219 brw->state.validated_bo_count = 0;
220 }
221
222 struct dirty_bit_map {
223 uint32_t bit;
224 char *name;
225 uint32_t count;
226 };
227
228 #define DEFINE_BIT(name) {name, #name, 0}
229
230 static struct dirty_bit_map mesa_bits[] = {
231 DEFINE_BIT(_NEW_MODELVIEW),
232 DEFINE_BIT(_NEW_PROJECTION),
233 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
234 DEFINE_BIT(_NEW_COLOR_MATRIX),
235 DEFINE_BIT(_NEW_ACCUM),
236 DEFINE_BIT(_NEW_COLOR),
237 DEFINE_BIT(_NEW_DEPTH),
238 DEFINE_BIT(_NEW_EVAL),
239 DEFINE_BIT(_NEW_FOG),
240 DEFINE_BIT(_NEW_HINT),
241 DEFINE_BIT(_NEW_LIGHT),
242 DEFINE_BIT(_NEW_LINE),
243 DEFINE_BIT(_NEW_PIXEL),
244 DEFINE_BIT(_NEW_POINT),
245 DEFINE_BIT(_NEW_POLYGON),
246 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
247 DEFINE_BIT(_NEW_SCISSOR),
248 DEFINE_BIT(_NEW_STENCIL),
249 DEFINE_BIT(_NEW_TEXTURE),
250 DEFINE_BIT(_NEW_TRANSFORM),
251 DEFINE_BIT(_NEW_VIEWPORT),
252 DEFINE_BIT(_NEW_PACKUNPACK),
253 DEFINE_BIT(_NEW_ARRAY),
254 DEFINE_BIT(_NEW_RENDERMODE),
255 DEFINE_BIT(_NEW_BUFFERS),
256 DEFINE_BIT(_NEW_MULTISAMPLE),
257 DEFINE_BIT(_NEW_TRACK_MATRIX),
258 DEFINE_BIT(_NEW_PROGRAM),
259 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
260 {0, 0, 0}
261 };
262
263 static struct dirty_bit_map brw_bits[] = {
264 DEFINE_BIT(BRW_NEW_URB_FENCE),
265 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
266 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
267 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
268 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
269 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
270 DEFINE_BIT(BRW_NEW_PRIMITIVE),
271 DEFINE_BIT(BRW_NEW_CONTEXT),
272 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
273 DEFINE_BIT(BRW_NEW_PSP),
274 DEFINE_BIT(BRW_NEW_INDICES),
275 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
276 DEFINE_BIT(BRW_NEW_VERTICES),
277 DEFINE_BIT(BRW_NEW_BATCH),
278 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
279 {0, 0, 0}
280 };
281
282 static struct dirty_bit_map cache_bits[] = {
283 DEFINE_BIT(CACHE_NEW_CC_VP),
284 DEFINE_BIT(CACHE_NEW_CC_UNIT),
285 DEFINE_BIT(CACHE_NEW_WM_PROG),
286 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
287 DEFINE_BIT(CACHE_NEW_SAMPLER),
288 DEFINE_BIT(CACHE_NEW_WM_UNIT),
289 DEFINE_BIT(CACHE_NEW_SF_PROG),
290 DEFINE_BIT(CACHE_NEW_SF_VP),
291 DEFINE_BIT(CACHE_NEW_SF_UNIT),
292 DEFINE_BIT(CACHE_NEW_VS_UNIT),
293 DEFINE_BIT(CACHE_NEW_VS_PROG),
294 DEFINE_BIT(CACHE_NEW_GS_UNIT),
295 DEFINE_BIT(CACHE_NEW_GS_PROG),
296 DEFINE_BIT(CACHE_NEW_CLIP_VP),
297 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
298 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
299 DEFINE_BIT(CACHE_NEW_SURFACE),
300 DEFINE_BIT(CACHE_NEW_SURF_BIND),
301 {0, 0, 0}
302 };
303
304
305 static void
306 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
307 {
308 int i;
309
310 for (i = 0; i < 32; i++) {
311 if (bit_map[i].bit == 0)
312 return;
313
314 if (bit_map[i].bit & bits)
315 bit_map[i].count++;
316 }
317 }
318
319 static void
320 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
321 {
322 int i;
323
324 for (i = 0; i < 32; i++) {
325 if (bit_map[i].bit == 0)
326 return;
327
328 fprintf(stderr, "0x%08x: %12d (%s)\n",
329 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
330 }
331 }
332
333 /***********************************************************************
334 * Emit all state:
335 */
336 void brw_validate_state( struct brw_context *brw )
337 {
338 GLcontext *ctx = &brw->intel.ctx;
339 struct intel_context *intel = &brw->intel;
340 struct brw_state_flags *state = &brw->state.dirty;
341 GLuint i;
342 const struct brw_tracked_state **atoms;
343 int num_atoms;
344
345 brw_clear_validated_bos(brw);
346
347 state->mesa |= brw->intel.NewGLState;
348 brw->intel.NewGLState = 0;
349
350 brw_add_validated_bo(brw, intel->batch->buf);
351
352 if (IS_GEN6(intel->intelScreen->deviceID)) {
353 atoms = gen6_atoms;
354 num_atoms = ARRAY_SIZE(gen6_atoms);
355 } else {
356 atoms = gen4_atoms;
357 num_atoms = ARRAY_SIZE(gen4_atoms);
358 }
359
360 if (brw->emit_state_always) {
361 state->mesa |= ~0;
362 state->brw |= ~0;
363 state->cache |= ~0;
364 }
365
366 if (brw->fragment_program != ctx->FragmentProgram._Current) {
367 brw->fragment_program = ctx->FragmentProgram._Current;
368 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
369 }
370
371 if (brw->vertex_program != ctx->VertexProgram._Current) {
372 brw->vertex_program = ctx->VertexProgram._Current;
373 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
374 }
375
376 if (state->mesa == 0 &&
377 state->cache == 0 &&
378 state->brw == 0)
379 return;
380
381 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
382 brw_clear_batch_cache(brw);
383
384 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
385
386 /* do prepare stage for all atoms */
387 for (i = 0; i < num_atoms; i++) {
388 const struct brw_tracked_state *atom = atoms[i];
389
390 if (brw->intel.Fallback)
391 break;
392
393 if (check_state(state, &atom->dirty)) {
394 if (atom->prepare) {
395 atom->prepare(brw);
396 }
397 }
398 }
399
400 intel_check_front_buffer_rendering(intel);
401
402 /* Make sure that the textures which are referenced by the current
403 * brw fragment program are actually present/valid.
404 * If this fails, we can experience GPU lock-ups.
405 */
406 {
407 const struct brw_fragment_program *fp;
408 fp = brw_fragment_program_const(brw->fragment_program);
409 if (fp) {
410 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
411 == fp->tex_units_used);
412 }
413 }
414 }
415
416
417 void brw_upload_state(struct brw_context *brw)
418 {
419 struct intel_context *intel = &brw->intel;
420 struct brw_state_flags *state = &brw->state.dirty;
421 int i;
422 static int dirty_count = 0;
423 const struct brw_tracked_state **atoms;
424 int num_atoms;
425
426 if (IS_GEN6(intel->intelScreen->deviceID)) {
427 atoms = gen6_atoms;
428 num_atoms = ARRAY_SIZE(gen6_atoms);
429 } else {
430 atoms = gen4_atoms;
431 num_atoms = ARRAY_SIZE(gen4_atoms);
432 }
433
434 brw_clear_validated_bos(brw);
435
436 if (INTEL_DEBUG) {
437 /* Debug version which enforces various sanity checks on the
438 * state flags which are generated and checked to help ensure
439 * state atoms are ordered correctly in the list.
440 */
441 struct brw_state_flags examined, prev;
442 memset(&examined, 0, sizeof(examined));
443 prev = *state;
444
445 for (i = 0; i < num_atoms; i++) {
446 const struct brw_tracked_state *atom = atoms[i];
447 struct brw_state_flags generated;
448
449 assert(atom->dirty.mesa ||
450 atom->dirty.brw ||
451 atom->dirty.cache);
452
453 if (brw->intel.Fallback)
454 break;
455
456 if (check_state(state, &atom->dirty)) {
457 if (atom->emit) {
458 atom->emit( brw );
459 }
460 }
461
462 accumulate_state(&examined, &atom->dirty);
463
464 /* generated = (prev ^ state)
465 * if (examined & generated)
466 * fail;
467 */
468 xor_states(&generated, &prev, state);
469 assert(!check_state(&examined, &generated));
470 prev = *state;
471 }
472 }
473 else {
474 for (i = 0; i < num_atoms; i++) {
475 const struct brw_tracked_state *atom = atoms[i];
476
477 if (brw->intel.Fallback)
478 break;
479
480 if (check_state(state, &atom->dirty)) {
481 if (atom->emit) {
482 atom->emit( brw );
483 }
484 }
485 }
486 }
487
488 if (INTEL_DEBUG & DEBUG_STATE) {
489 brw_update_dirty_count(mesa_bits, state->mesa);
490 brw_update_dirty_count(brw_bits, state->brw);
491 brw_update_dirty_count(cache_bits, state->cache);
492 if (dirty_count++ % 1000 == 0) {
493 brw_print_dirty_count(mesa_bits, state->mesa);
494 brw_print_dirty_count(brw_bits, state->brw);
495 brw_print_dirty_count(cache_bits, state->cache);
496 fprintf(stderr, "\n");
497 }
498 }
499
500 if (!brw->intel.Fallback)
501 memset(state, 0, sizeof(*state));
502 }