i965: Untested Sandybridge SF setup.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 #if 0
113 &brw_wm_prog,
114
115 /* Once all the programs are done, we know how large urb entry
116 * sizes need to be and can decide if we need to change the urb
117 * layout.
118 */
119 &brw_curbe_offsets,
120 #endif
121 &gen6_clip_vp,
122 &gen6_sf_vp,
123 &gen6_cc_vp,
124 &gen6_viewport_state, /* must do after *_vp stages */
125
126 &gen6_urb,
127 &gen6_blend_state, /* must do before cc unit */
128 &gen6_color_calc_state, /* must do before cc unit */
129 &gen6_depth_stencil_state, /* must do before cc unit */
130 &gen6_cc_state_pointers,
131
132 &brw_vs_surfaces, /* must do before unit */
133 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
134 &brw_wm_surfaces, /* must do before samplers and unit */
135
136 &gen6_vs_state,
137 &gen6_gs_state,
138 &gen6_clip_state,
139 #if 0
140 &brw_wm_samplers,
141
142 &brw_wm_unit,
143 #endif
144 &gen6_sf_state,
145
146 &gen6_scissor_state,
147
148 #if 0
149 /* Command packets:
150 */
151 &brw_invarient_state,
152 #endif
153
154 &brw_state_base_address,
155
156 #if 0
157 &brw_binding_table_pointers,
158 &brw_blend_constant_color,
159 #endif
160
161 &brw_depthbuffer,
162
163 #if 0
164 &brw_polygon_stipple,
165 &brw_polygon_stipple_offset,
166
167 &brw_line_stipple,
168 &brw_aa_line_parameters,
169
170 &brw_psp_urb_cbs,
171 #endif
172
173 &brw_drawing_rect,
174
175 &brw_indices,
176 &brw_index_buffer,
177 &brw_vertices,
178 };
179
180 void brw_init_state( struct brw_context *brw )
181 {
182 brw_init_caches(brw);
183 }
184
185
186 void brw_destroy_state( struct brw_context *brw )
187 {
188 brw_destroy_caches(brw);
189 brw_destroy_batch_cache(brw);
190 }
191
192 /***********************************************************************
193 */
194
195 static GLboolean check_state( const struct brw_state_flags *a,
196 const struct brw_state_flags *b )
197 {
198 return ((a->mesa & b->mesa) ||
199 (a->brw & b->brw) ||
200 (a->cache & b->cache));
201 }
202
203 static void accumulate_state( struct brw_state_flags *a,
204 const struct brw_state_flags *b )
205 {
206 a->mesa |= b->mesa;
207 a->brw |= b->brw;
208 a->cache |= b->cache;
209 }
210
211
212 static void xor_states( struct brw_state_flags *result,
213 const struct brw_state_flags *a,
214 const struct brw_state_flags *b )
215 {
216 result->mesa = a->mesa ^ b->mesa;
217 result->brw = a->brw ^ b->brw;
218 result->cache = a->cache ^ b->cache;
219 }
220
221 void
222 brw_clear_validated_bos(struct brw_context *brw)
223 {
224 int i;
225
226 /* Clear the last round of validated bos */
227 for (i = 0; i < brw->state.validated_bo_count; i++) {
228 dri_bo_unreference(brw->state.validated_bos[i]);
229 brw->state.validated_bos[i] = NULL;
230 }
231 brw->state.validated_bo_count = 0;
232 }
233
234 struct dirty_bit_map {
235 uint32_t bit;
236 char *name;
237 uint32_t count;
238 };
239
240 #define DEFINE_BIT(name) {name, #name, 0}
241
242 static struct dirty_bit_map mesa_bits[] = {
243 DEFINE_BIT(_NEW_MODELVIEW),
244 DEFINE_BIT(_NEW_PROJECTION),
245 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
246 DEFINE_BIT(_NEW_COLOR_MATRIX),
247 DEFINE_BIT(_NEW_ACCUM),
248 DEFINE_BIT(_NEW_COLOR),
249 DEFINE_BIT(_NEW_DEPTH),
250 DEFINE_BIT(_NEW_EVAL),
251 DEFINE_BIT(_NEW_FOG),
252 DEFINE_BIT(_NEW_HINT),
253 DEFINE_BIT(_NEW_LIGHT),
254 DEFINE_BIT(_NEW_LINE),
255 DEFINE_BIT(_NEW_PIXEL),
256 DEFINE_BIT(_NEW_POINT),
257 DEFINE_BIT(_NEW_POLYGON),
258 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
259 DEFINE_BIT(_NEW_SCISSOR),
260 DEFINE_BIT(_NEW_STENCIL),
261 DEFINE_BIT(_NEW_TEXTURE),
262 DEFINE_BIT(_NEW_TRANSFORM),
263 DEFINE_BIT(_NEW_VIEWPORT),
264 DEFINE_BIT(_NEW_PACKUNPACK),
265 DEFINE_BIT(_NEW_ARRAY),
266 DEFINE_BIT(_NEW_RENDERMODE),
267 DEFINE_BIT(_NEW_BUFFERS),
268 DEFINE_BIT(_NEW_MULTISAMPLE),
269 DEFINE_BIT(_NEW_TRACK_MATRIX),
270 DEFINE_BIT(_NEW_PROGRAM),
271 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
272 {0, 0, 0}
273 };
274
275 static struct dirty_bit_map brw_bits[] = {
276 DEFINE_BIT(BRW_NEW_URB_FENCE),
277 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
278 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
279 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
280 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
281 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
282 DEFINE_BIT(BRW_NEW_PRIMITIVE),
283 DEFINE_BIT(BRW_NEW_CONTEXT),
284 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
285 DEFINE_BIT(BRW_NEW_PSP),
286 DEFINE_BIT(BRW_NEW_INDICES),
287 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
288 DEFINE_BIT(BRW_NEW_VERTICES),
289 DEFINE_BIT(BRW_NEW_BATCH),
290 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
291 {0, 0, 0}
292 };
293
294 static struct dirty_bit_map cache_bits[] = {
295 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
296 DEFINE_BIT(CACHE_NEW_CC_VP),
297 DEFINE_BIT(CACHE_NEW_CC_UNIT),
298 DEFINE_BIT(CACHE_NEW_WM_PROG),
299 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
300 DEFINE_BIT(CACHE_NEW_SAMPLER),
301 DEFINE_BIT(CACHE_NEW_WM_UNIT),
302 DEFINE_BIT(CACHE_NEW_SF_PROG),
303 DEFINE_BIT(CACHE_NEW_SF_VP),
304 DEFINE_BIT(CACHE_NEW_SF_UNIT),
305 DEFINE_BIT(CACHE_NEW_VS_UNIT),
306 DEFINE_BIT(CACHE_NEW_VS_PROG),
307 DEFINE_BIT(CACHE_NEW_GS_UNIT),
308 DEFINE_BIT(CACHE_NEW_GS_PROG),
309 DEFINE_BIT(CACHE_NEW_CLIP_VP),
310 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
311 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
312 DEFINE_BIT(CACHE_NEW_SURFACE),
313 DEFINE_BIT(CACHE_NEW_SURF_BIND),
314 {0, 0, 0}
315 };
316
317
318 static void
319 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
320 {
321 int i;
322
323 for (i = 0; i < 32; i++) {
324 if (bit_map[i].bit == 0)
325 return;
326
327 if (bit_map[i].bit & bits)
328 bit_map[i].count++;
329 }
330 }
331
332 static void
333 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
334 {
335 int i;
336
337 for (i = 0; i < 32; i++) {
338 if (bit_map[i].bit == 0)
339 return;
340
341 fprintf(stderr, "0x%08x: %12d (%s)\n",
342 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
343 }
344 }
345
346 /***********************************************************************
347 * Emit all state:
348 */
349 void brw_validate_state( struct brw_context *brw )
350 {
351 GLcontext *ctx = &brw->intel.ctx;
352 struct intel_context *intel = &brw->intel;
353 struct brw_state_flags *state = &brw->state.dirty;
354 GLuint i;
355 const struct brw_tracked_state **atoms;
356 int num_atoms;
357
358 brw_clear_validated_bos(brw);
359
360 state->mesa |= brw->intel.NewGLState;
361 brw->intel.NewGLState = 0;
362
363 brw_add_validated_bo(brw, intel->batch->buf);
364
365 if (IS_GEN6(intel->intelScreen->deviceID)) {
366 atoms = gen6_atoms;
367 num_atoms = ARRAY_SIZE(gen6_atoms);
368 } else {
369 atoms = gen4_atoms;
370 num_atoms = ARRAY_SIZE(gen4_atoms);
371 }
372
373 if (brw->emit_state_always) {
374 state->mesa |= ~0;
375 state->brw |= ~0;
376 state->cache |= ~0;
377 }
378
379 if (brw->fragment_program != ctx->FragmentProgram._Current) {
380 brw->fragment_program = ctx->FragmentProgram._Current;
381 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
382 }
383
384 if (brw->vertex_program != ctx->VertexProgram._Current) {
385 brw->vertex_program = ctx->VertexProgram._Current;
386 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
387 }
388
389 if (state->mesa == 0 &&
390 state->cache == 0 &&
391 state->brw == 0)
392 return;
393
394 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
395 brw_clear_batch_cache(brw);
396
397 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
398
399 /* do prepare stage for all atoms */
400 for (i = 0; i < num_atoms; i++) {
401 const struct brw_tracked_state *atom = atoms[i];
402
403 if (brw->intel.Fallback)
404 break;
405
406 if (check_state(state, &atom->dirty)) {
407 if (atom->prepare) {
408 atom->prepare(brw);
409 }
410 }
411 }
412
413 intel_check_front_buffer_rendering(intel);
414
415 /* Make sure that the textures which are referenced by the current
416 * brw fragment program are actually present/valid.
417 * If this fails, we can experience GPU lock-ups.
418 */
419 {
420 const struct brw_fragment_program *fp;
421 fp = brw_fragment_program_const(brw->fragment_program);
422 if (fp) {
423 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
424 == fp->tex_units_used);
425 }
426 }
427 }
428
429
430 void brw_upload_state(struct brw_context *brw)
431 {
432 struct intel_context *intel = &brw->intel;
433 struct brw_state_flags *state = &brw->state.dirty;
434 int i;
435 static int dirty_count = 0;
436 const struct brw_tracked_state **atoms;
437 int num_atoms;
438
439 if (IS_GEN6(intel->intelScreen->deviceID)) {
440 atoms = gen6_atoms;
441 num_atoms = ARRAY_SIZE(gen6_atoms);
442 } else {
443 atoms = gen4_atoms;
444 num_atoms = ARRAY_SIZE(gen4_atoms);
445 }
446
447 brw_clear_validated_bos(brw);
448
449 if (INTEL_DEBUG) {
450 /* Debug version which enforces various sanity checks on the
451 * state flags which are generated and checked to help ensure
452 * state atoms are ordered correctly in the list.
453 */
454 struct brw_state_flags examined, prev;
455 memset(&examined, 0, sizeof(examined));
456 prev = *state;
457
458 for (i = 0; i < num_atoms; i++) {
459 const struct brw_tracked_state *atom = atoms[i];
460 struct brw_state_flags generated;
461
462 assert(atom->dirty.mesa ||
463 atom->dirty.brw ||
464 atom->dirty.cache);
465
466 if (brw->intel.Fallback)
467 break;
468
469 if (check_state(state, &atom->dirty)) {
470 if (atom->emit) {
471 atom->emit( brw );
472 }
473 }
474
475 accumulate_state(&examined, &atom->dirty);
476
477 /* generated = (prev ^ state)
478 * if (examined & generated)
479 * fail;
480 */
481 xor_states(&generated, &prev, state);
482 assert(!check_state(&examined, &generated));
483 prev = *state;
484 }
485 }
486 else {
487 for (i = 0; i < num_atoms; i++) {
488 const struct brw_tracked_state *atom = atoms[i];
489
490 if (brw->intel.Fallback)
491 break;
492
493 if (check_state(state, &atom->dirty)) {
494 if (atom->emit) {
495 atom->emit( brw );
496 }
497 }
498 }
499 }
500
501 if (INTEL_DEBUG & DEBUG_STATE) {
502 brw_update_dirty_count(mesa_bits, state->mesa);
503 brw_update_dirty_count(brw_bits, state->brw);
504 brw_update_dirty_count(cache_bits, state->cache);
505 if (dirty_count++ % 1000 == 0) {
506 brw_print_dirty_count(mesa_bits, state->mesa);
507 brw_print_dirty_count(brw_bits, state->brw);
508 brw_print_dirty_count(cache_bits, state->cache);
509 fprintf(stderr, "\n");
510 }
511 }
512
513 if (!brw->intel.Fallback)
514 memset(state, 0, sizeof(*state));
515 }