i965: Set the CC VP state immediately on state change.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_unit,
65
66 &brw_vs_surfaces, /* must do before unit */
67 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
68 &brw_wm_surfaces, /* must do before samplers and unit */
69 &brw_wm_samplers,
70
71 &brw_wm_unit,
72 &brw_sf_vp,
73 &brw_sf_unit,
74 &brw_vs_unit, /* always required, enabled or not */
75 &brw_clip_unit,
76 &brw_gs_unit,
77
78 /* Command packets:
79 */
80 &brw_invarient_state,
81 &brw_state_base_address,
82
83 &brw_binding_table_pointers,
84 &brw_blend_constant_color,
85
86 &brw_depthbuffer,
87
88 &brw_polygon_stipple,
89 &brw_polygon_stipple_offset,
90
91 &brw_line_stipple,
92 &brw_aa_line_parameters,
93
94 &brw_psp_urb_cbs,
95
96 &brw_drawing_rect,
97 &brw_indices,
98 &brw_index_buffer,
99 &brw_vertices,
100
101 &brw_constant_buffer
102 };
103
104 const struct brw_tracked_state *gen6_atoms[] =
105 {
106 &brw_check_fallback,
107
108 &brw_wm_input_sizes,
109 &brw_vs_prog,
110 &brw_gs_prog,
111 &brw_wm_prog,
112
113 &gen6_clip_vp,
114 &gen6_sf_vp,
115 &gen6_cc_vp,
116
117 /* Command packets: */
118 &brw_invarient_state,
119
120 &gen6_viewport_state, /* must do after *_vp stages */
121
122 &gen6_urb,
123 &gen6_blend_state, /* must do before cc unit */
124 &gen6_color_calc_state, /* must do before cc unit */
125 &gen6_depth_stencil_state, /* must do before cc unit */
126 &gen6_cc_state_pointers,
127
128 &brw_vs_surfaces, /* must do before unit */
129 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
130 &brw_wm_surfaces, /* must do before samplers and unit */
131
132 &brw_wm_samplers,
133 &gen6_sampler_state,
134
135 &gen6_vs_state,
136 &gen6_gs_state,
137 &gen6_clip_state,
138 &gen6_sf_state,
139 &gen6_wm_state,
140
141 &gen6_scissor_state,
142
143 &brw_state_base_address,
144
145 &gen6_binding_table_pointers,
146
147 &brw_depthbuffer,
148
149 &brw_polygon_stipple,
150 &brw_polygon_stipple_offset,
151
152 &brw_line_stipple,
153 &brw_aa_line_parameters,
154
155 &brw_drawing_rect,
156
157 &brw_indices,
158 &brw_index_buffer,
159 &brw_vertices,
160 };
161
162 void brw_init_state( struct brw_context *brw )
163 {
164 brw_init_caches(brw);
165 }
166
167
168 void brw_destroy_state( struct brw_context *brw )
169 {
170 brw_destroy_caches(brw);
171 brw_destroy_batch_cache(brw);
172 }
173
174 /***********************************************************************
175 */
176
177 static GLboolean check_state( const struct brw_state_flags *a,
178 const struct brw_state_flags *b )
179 {
180 return ((a->mesa & b->mesa) ||
181 (a->brw & b->brw) ||
182 (a->cache & b->cache));
183 }
184
185 static void accumulate_state( struct brw_state_flags *a,
186 const struct brw_state_flags *b )
187 {
188 a->mesa |= b->mesa;
189 a->brw |= b->brw;
190 a->cache |= b->cache;
191 }
192
193
194 static void xor_states( struct brw_state_flags *result,
195 const struct brw_state_flags *a,
196 const struct brw_state_flags *b )
197 {
198 result->mesa = a->mesa ^ b->mesa;
199 result->brw = a->brw ^ b->brw;
200 result->cache = a->cache ^ b->cache;
201 }
202
203 void
204 brw_clear_validated_bos(struct brw_context *brw)
205 {
206 int i;
207
208 /* Clear the last round of validated bos */
209 for (i = 0; i < brw->state.validated_bo_count; i++) {
210 drm_intel_bo_unreference(brw->state.validated_bos[i]);
211 brw->state.validated_bos[i] = NULL;
212 }
213 brw->state.validated_bo_count = 0;
214 }
215
216 struct dirty_bit_map {
217 uint32_t bit;
218 char *name;
219 uint32_t count;
220 };
221
222 #define DEFINE_BIT(name) {name, #name, 0}
223
224 static struct dirty_bit_map mesa_bits[] = {
225 DEFINE_BIT(_NEW_MODELVIEW),
226 DEFINE_BIT(_NEW_PROJECTION),
227 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
228 DEFINE_BIT(_NEW_COLOR_MATRIX),
229 DEFINE_BIT(_NEW_ACCUM),
230 DEFINE_BIT(_NEW_COLOR),
231 DEFINE_BIT(_NEW_DEPTH),
232 DEFINE_BIT(_NEW_EVAL),
233 DEFINE_BIT(_NEW_FOG),
234 DEFINE_BIT(_NEW_HINT),
235 DEFINE_BIT(_NEW_LIGHT),
236 DEFINE_BIT(_NEW_LINE),
237 DEFINE_BIT(_NEW_PIXEL),
238 DEFINE_BIT(_NEW_POINT),
239 DEFINE_BIT(_NEW_POLYGON),
240 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
241 DEFINE_BIT(_NEW_SCISSOR),
242 DEFINE_BIT(_NEW_STENCIL),
243 DEFINE_BIT(_NEW_TEXTURE),
244 DEFINE_BIT(_NEW_TRANSFORM),
245 DEFINE_BIT(_NEW_VIEWPORT),
246 DEFINE_BIT(_NEW_PACKUNPACK),
247 DEFINE_BIT(_NEW_ARRAY),
248 DEFINE_BIT(_NEW_RENDERMODE),
249 DEFINE_BIT(_NEW_BUFFERS),
250 DEFINE_BIT(_NEW_MULTISAMPLE),
251 DEFINE_BIT(_NEW_TRACK_MATRIX),
252 DEFINE_BIT(_NEW_PROGRAM),
253 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
254 {0, 0, 0}
255 };
256
257 static struct dirty_bit_map brw_bits[] = {
258 DEFINE_BIT(BRW_NEW_URB_FENCE),
259 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
260 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
261 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
262 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
263 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
264 DEFINE_BIT(BRW_NEW_PRIMITIVE),
265 DEFINE_BIT(BRW_NEW_CONTEXT),
266 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
267 DEFINE_BIT(BRW_NEW_PSP),
268 DEFINE_BIT(BRW_NEW_INDICES),
269 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
270 DEFINE_BIT(BRW_NEW_VERTICES),
271 DEFINE_BIT(BRW_NEW_BATCH),
272 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
273 {0, 0, 0}
274 };
275
276 static struct dirty_bit_map cache_bits[] = {
277 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
278 DEFINE_BIT(CACHE_NEW_CC_VP),
279 DEFINE_BIT(CACHE_NEW_CC_UNIT),
280 DEFINE_BIT(CACHE_NEW_WM_PROG),
281 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
282 DEFINE_BIT(CACHE_NEW_SAMPLER),
283 DEFINE_BIT(CACHE_NEW_WM_UNIT),
284 DEFINE_BIT(CACHE_NEW_SF_PROG),
285 DEFINE_BIT(CACHE_NEW_SF_VP),
286 DEFINE_BIT(CACHE_NEW_SF_UNIT),
287 DEFINE_BIT(CACHE_NEW_VS_UNIT),
288 DEFINE_BIT(CACHE_NEW_VS_PROG),
289 DEFINE_BIT(CACHE_NEW_GS_UNIT),
290 DEFINE_BIT(CACHE_NEW_GS_PROG),
291 DEFINE_BIT(CACHE_NEW_CLIP_VP),
292 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
293 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
294 DEFINE_BIT(CACHE_NEW_SURFACE),
295 DEFINE_BIT(CACHE_NEW_SURF_BIND),
296 {0, 0, 0}
297 };
298
299
300 static void
301 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
302 {
303 int i;
304
305 for (i = 0; i < 32; i++) {
306 if (bit_map[i].bit == 0)
307 return;
308
309 if (bit_map[i].bit & bits)
310 bit_map[i].count++;
311 }
312 }
313
314 static void
315 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
316 {
317 int i;
318
319 for (i = 0; i < 32; i++) {
320 if (bit_map[i].bit == 0)
321 return;
322
323 fprintf(stderr, "0x%08x: %12d (%s)\n",
324 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
325 }
326 }
327
328 /***********************************************************************
329 * Emit all state:
330 */
331 void brw_validate_state( struct brw_context *brw )
332 {
333 GLcontext *ctx = &brw->intel.ctx;
334 struct intel_context *intel = &brw->intel;
335 struct brw_state_flags *state = &brw->state.dirty;
336 GLuint i;
337 const struct brw_tracked_state **atoms;
338 int num_atoms;
339
340 brw_clear_validated_bos(brw);
341
342 state->mesa |= brw->intel.NewGLState;
343 brw->intel.NewGLState = 0;
344
345 brw_add_validated_bo(brw, intel->batch->buf);
346
347 if (IS_GEN6(intel->intelScreen->deviceID)) {
348 atoms = gen6_atoms;
349 num_atoms = ARRAY_SIZE(gen6_atoms);
350 } else {
351 atoms = gen4_atoms;
352 num_atoms = ARRAY_SIZE(gen4_atoms);
353 }
354
355 if (brw->emit_state_always) {
356 state->mesa |= ~0;
357 state->brw |= ~0;
358 state->cache |= ~0;
359 }
360
361 if (brw->fragment_program != ctx->FragmentProgram._Current) {
362 brw->fragment_program = ctx->FragmentProgram._Current;
363 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
364 }
365
366 if (brw->vertex_program != ctx->VertexProgram._Current) {
367 brw->vertex_program = ctx->VertexProgram._Current;
368 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
369 }
370
371 if (state->mesa == 0 &&
372 state->cache == 0 &&
373 state->brw == 0)
374 return;
375
376 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
377 brw_clear_batch_cache(brw);
378
379 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
380
381 /* do prepare stage for all atoms */
382 for (i = 0; i < num_atoms; i++) {
383 const struct brw_tracked_state *atom = atoms[i];
384
385 if (brw->intel.Fallback)
386 break;
387
388 if (check_state(state, &atom->dirty)) {
389 if (atom->prepare) {
390 atom->prepare(brw);
391 }
392 }
393 }
394
395 intel_check_front_buffer_rendering(intel);
396
397 /* Make sure that the textures which are referenced by the current
398 * brw fragment program are actually present/valid.
399 * If this fails, we can experience GPU lock-ups.
400 */
401 {
402 const struct brw_fragment_program *fp;
403 fp = brw_fragment_program_const(brw->fragment_program);
404 if (fp) {
405 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
406 == fp->tex_units_used);
407 }
408 }
409 }
410
411
412 void brw_upload_state(struct brw_context *brw)
413 {
414 struct intel_context *intel = &brw->intel;
415 struct brw_state_flags *state = &brw->state.dirty;
416 int i;
417 static int dirty_count = 0;
418 const struct brw_tracked_state **atoms;
419 int num_atoms;
420
421 if (IS_GEN6(intel->intelScreen->deviceID)) {
422 atoms = gen6_atoms;
423 num_atoms = ARRAY_SIZE(gen6_atoms);
424 } else {
425 atoms = gen4_atoms;
426 num_atoms = ARRAY_SIZE(gen4_atoms);
427 }
428
429 brw_clear_validated_bos(brw);
430
431 if (INTEL_DEBUG) {
432 /* Debug version which enforces various sanity checks on the
433 * state flags which are generated and checked to help ensure
434 * state atoms are ordered correctly in the list.
435 */
436 struct brw_state_flags examined, prev;
437 memset(&examined, 0, sizeof(examined));
438 prev = *state;
439
440 for (i = 0; i < num_atoms; i++) {
441 const struct brw_tracked_state *atom = atoms[i];
442 struct brw_state_flags generated;
443
444 assert(atom->dirty.mesa ||
445 atom->dirty.brw ||
446 atom->dirty.cache);
447
448 if (brw->intel.Fallback)
449 break;
450
451 if (check_state(state, &atom->dirty)) {
452 if (atom->emit) {
453 atom->emit( brw );
454 }
455 }
456
457 accumulate_state(&examined, &atom->dirty);
458
459 /* generated = (prev ^ state)
460 * if (examined & generated)
461 * fail;
462 */
463 xor_states(&generated, &prev, state);
464 assert(!check_state(&examined, &generated));
465 prev = *state;
466 }
467 }
468 else {
469 for (i = 0; i < num_atoms; i++) {
470 const struct brw_tracked_state *atom = atoms[i];
471
472 if (brw->intel.Fallback)
473 break;
474
475 if (check_state(state, &atom->dirty)) {
476 if (atom->emit) {
477 atom->emit( brw );
478 }
479 }
480 }
481 }
482
483 if (INTEL_DEBUG & DEBUG_STATE) {
484 brw_update_dirty_count(mesa_bits, state->mesa);
485 brw_update_dirty_count(brw_bits, state->brw);
486 brw_update_dirty_count(cache_bits, state->cache);
487 if (dirty_count++ % 1000 == 0) {
488 brw_print_dirty_count(mesa_bits, state->mesa);
489 brw_print_dirty_count(brw_bits, state->brw);
490 brw_print_dirty_count(cache_bits, state->cache);
491 fprintf(stderr, "\n");
492 }
493 }
494
495 if (!brw->intel.Fallback)
496 memset(state, 0, sizeof(*state));
497 }