i965: Micro-optimise check_state
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 static const struct brw_tracked_state *gen4_atoms[] =
46 {
47 &brw_check_fallback,
48
49 &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_unit,
64
65 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
66 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
67
68 &brw_vs_surfaces, /* must do before unit */
69 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
70 &brw_wm_surfaces, /* must do before samplers and unit */
71 &brw_wm_binding_table,
72 &brw_wm_samplers,
73
74 &brw_wm_unit,
75 &brw_sf_vp,
76 &brw_sf_unit,
77 &brw_vs_unit, /* always required, enabled or not */
78 &brw_clip_unit,
79 &brw_gs_unit,
80
81 /* Command packets:
82 */
83 &brw_invarient_state,
84 &brw_state_base_address,
85
86 &brw_binding_table_pointers,
87 &brw_blend_constant_color,
88
89 &brw_depthbuffer,
90
91 &brw_polygon_stipple,
92 &brw_polygon_stipple_offset,
93
94 &brw_line_stipple,
95 &brw_aa_line_parameters,
96
97 &brw_psp_urb_cbs,
98
99 &brw_drawing_rect,
100 &brw_indices,
101 &brw_index_buffer,
102 &brw_vertices,
103
104 &brw_constant_buffer
105 };
106
107 const struct brw_tracked_state *gen6_atoms[] =
108 {
109 &brw_check_fallback,
110
111 &brw_wm_input_sizes,
112 &brw_vs_prog,
113 &brw_gs_prog,
114 &brw_wm_prog,
115
116 &gen6_clip_vp,
117 &gen6_sf_vp,
118
119 /* Command packets: */
120 &brw_invarient_state,
121
122 &gen6_viewport_state, /* must do after *_vp stages */
123
124 &gen6_urb,
125 &gen6_blend_state, /* must do before cc unit */
126 &gen6_color_calc_state, /* must do before cc unit */
127 &gen6_depth_stencil_state, /* must do before cc unit */
128 &gen6_cc_state_pointers,
129
130 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
131 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
132 &gen6_wm_constants, /* Before wm_state */
133
134 &brw_vs_surfaces, /* must do before unit */
135 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
136 &brw_wm_surfaces, /* must do before samplers and unit */
137 &brw_wm_binding_table,
138
139 &brw_wm_samplers,
140 &gen6_sampler_state,
141
142 &gen6_vs_state,
143 &gen6_gs_state,
144 &gen6_clip_state,
145 &gen6_sf_state,
146 &gen6_wm_state,
147
148 &gen6_scissor_state,
149 &gen6_scissor_state_pointers,
150
151 &brw_state_base_address,
152
153 &gen6_binding_table_pointers,
154
155 &brw_depthbuffer,
156
157 &brw_polygon_stipple,
158 &brw_polygon_stipple_offset,
159
160 &brw_line_stipple,
161 &brw_aa_line_parameters,
162
163 &brw_drawing_rect,
164
165 &brw_indices,
166 &brw_index_buffer,
167 &brw_vertices,
168 };
169
170 void brw_init_state( struct brw_context *brw )
171 {
172 brw_init_caches(brw);
173 }
174
175
176 void brw_destroy_state( struct brw_context *brw )
177 {
178 brw_destroy_caches(brw);
179 }
180
181 /***********************************************************************
182 */
183
184 static GLuint check_state( const struct brw_state_flags *a,
185 const struct brw_state_flags *b )
186 {
187 return ((a->mesa & b->mesa) |
188 (a->brw & b->brw) |
189 (a->cache & b->cache));
190 }
191
192 static void accumulate_state( struct brw_state_flags *a,
193 const struct brw_state_flags *b )
194 {
195 a->mesa |= b->mesa;
196 a->brw |= b->brw;
197 a->cache |= b->cache;
198 }
199
200
201 static void xor_states( struct brw_state_flags *result,
202 const struct brw_state_flags *a,
203 const struct brw_state_flags *b )
204 {
205 result->mesa = a->mesa ^ b->mesa;
206 result->brw = a->brw ^ b->brw;
207 result->cache = a->cache ^ b->cache;
208 }
209
210 void
211 brw_clear_validated_bos(struct brw_context *brw)
212 {
213 int i;
214
215 /* Clear the last round of validated bos */
216 for (i = 0; i < brw->state.validated_bo_count; i++) {
217 drm_intel_bo_unreference(brw->state.validated_bos[i]);
218 brw->state.validated_bos[i] = NULL;
219 }
220 brw->state.validated_bo_count = 0;
221 }
222
223 struct dirty_bit_map {
224 uint32_t bit;
225 char *name;
226 uint32_t count;
227 };
228
229 #define DEFINE_BIT(name) {name, #name, 0}
230
231 static struct dirty_bit_map mesa_bits[] = {
232 DEFINE_BIT(_NEW_MODELVIEW),
233 DEFINE_BIT(_NEW_PROJECTION),
234 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
235 DEFINE_BIT(_NEW_COLOR),
236 DEFINE_BIT(_NEW_DEPTH),
237 DEFINE_BIT(_NEW_EVAL),
238 DEFINE_BIT(_NEW_FOG),
239 DEFINE_BIT(_NEW_HINT),
240 DEFINE_BIT(_NEW_LIGHT),
241 DEFINE_BIT(_NEW_LINE),
242 DEFINE_BIT(_NEW_PIXEL),
243 DEFINE_BIT(_NEW_POINT),
244 DEFINE_BIT(_NEW_POLYGON),
245 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
246 DEFINE_BIT(_NEW_SCISSOR),
247 DEFINE_BIT(_NEW_STENCIL),
248 DEFINE_BIT(_NEW_TEXTURE),
249 DEFINE_BIT(_NEW_TRANSFORM),
250 DEFINE_BIT(_NEW_VIEWPORT),
251 DEFINE_BIT(_NEW_PACKUNPACK),
252 DEFINE_BIT(_NEW_ARRAY),
253 DEFINE_BIT(_NEW_RENDERMODE),
254 DEFINE_BIT(_NEW_BUFFERS),
255 DEFINE_BIT(_NEW_MULTISAMPLE),
256 DEFINE_BIT(_NEW_TRACK_MATRIX),
257 DEFINE_BIT(_NEW_PROGRAM),
258 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
259 {0, 0, 0}
260 };
261
262 static struct dirty_bit_map brw_bits[] = {
263 DEFINE_BIT(BRW_NEW_URB_FENCE),
264 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
265 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
266 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
267 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
268 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
269 DEFINE_BIT(BRW_NEW_PRIMITIVE),
270 DEFINE_BIT(BRW_NEW_CONTEXT),
271 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
272 DEFINE_BIT(BRW_NEW_PSP),
273 DEFINE_BIT(BRW_NEW_WM_SURFACES),
274 DEFINE_BIT(BRW_NEW_BINDING_TABLE),
275 DEFINE_BIT(BRW_NEW_INDICES),
276 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
277 DEFINE_BIT(BRW_NEW_VERTICES),
278 DEFINE_BIT(BRW_NEW_BATCH),
279 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
280 DEFINE_BIT(BRW_NEW_NR_WM_SURFACES),
281 DEFINE_BIT(BRW_NEW_NR_VS_SURFACES),
282 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
283 DEFINE_BIT(BRW_NEW_WM_CONSTBUF),
284 {0, 0, 0}
285 };
286
287 static struct dirty_bit_map cache_bits[] = {
288 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
289 DEFINE_BIT(CACHE_NEW_CC_VP),
290 DEFINE_BIT(CACHE_NEW_CC_UNIT),
291 DEFINE_BIT(CACHE_NEW_WM_PROG),
292 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
293 DEFINE_BIT(CACHE_NEW_SAMPLER),
294 DEFINE_BIT(CACHE_NEW_WM_UNIT),
295 DEFINE_BIT(CACHE_NEW_SF_PROG),
296 DEFINE_BIT(CACHE_NEW_SF_VP),
297 DEFINE_BIT(CACHE_NEW_SF_UNIT),
298 DEFINE_BIT(CACHE_NEW_VS_UNIT),
299 DEFINE_BIT(CACHE_NEW_VS_PROG),
300 DEFINE_BIT(CACHE_NEW_GS_UNIT),
301 DEFINE_BIT(CACHE_NEW_GS_PROG),
302 DEFINE_BIT(CACHE_NEW_CLIP_VP),
303 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
304 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
305 {0, 0, 0}
306 };
307
308
309 static void
310 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
311 {
312 int i;
313
314 for (i = 0; i < 32; i++) {
315 if (bit_map[i].bit == 0)
316 return;
317
318 if (bit_map[i].bit & bits)
319 bit_map[i].count++;
320 }
321 }
322
323 static void
324 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
325 {
326 int i;
327
328 for (i = 0; i < 32; i++) {
329 if (bit_map[i].bit == 0)
330 return;
331
332 fprintf(stderr, "0x%08x: %12d (%s)\n",
333 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
334 }
335 }
336
337 /***********************************************************************
338 * Emit all state:
339 */
340 void brw_validate_state( struct brw_context *brw )
341 {
342 struct gl_context *ctx = &brw->intel.ctx;
343 struct intel_context *intel = &brw->intel;
344 struct brw_state_flags *state = &brw->state.dirty;
345 GLuint i;
346 const struct brw_tracked_state **atoms;
347 int num_atoms;
348
349 brw_clear_validated_bos(brw);
350
351 state->mesa |= brw->intel.NewGLState;
352 brw->intel.NewGLState = 0;
353
354 brw_add_validated_bo(brw, intel->batch.bo);
355
356 if (intel->gen >= 6) {
357 atoms = gen6_atoms;
358 num_atoms = ARRAY_SIZE(gen6_atoms);
359 } else {
360 atoms = gen4_atoms;
361 num_atoms = ARRAY_SIZE(gen4_atoms);
362 }
363
364 if (brw->emit_state_always) {
365 state->mesa |= ~0;
366 state->brw |= ~0;
367 state->cache |= ~0;
368 }
369
370 if (brw->fragment_program != ctx->FragmentProgram._Current) {
371 brw->fragment_program = ctx->FragmentProgram._Current;
372 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
373 }
374
375 if (brw->vertex_program != ctx->VertexProgram._Current) {
376 brw->vertex_program = ctx->VertexProgram._Current;
377 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
378 }
379
380 if ((state->mesa | state->cache | state->brw) == 0)
381 return;
382
383 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
384
385 /* do prepare stage for all atoms */
386 for (i = 0; i < num_atoms; i++) {
387 const struct brw_tracked_state *atom = atoms[i];
388
389 if (brw->intel.Fallback)
390 break;
391
392 if (check_state(state, &atom->dirty)) {
393 if (atom->prepare) {
394 atom->prepare(brw);
395 }
396 }
397 }
398
399 intel_check_front_buffer_rendering(intel);
400
401 /* Make sure that the textures which are referenced by the current
402 * brw fragment program are actually present/valid.
403 * If this fails, we can experience GPU lock-ups.
404 */
405 {
406 const struct brw_fragment_program *fp;
407 fp = brw_fragment_program_const(brw->fragment_program);
408 if (fp) {
409 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
410 == fp->tex_units_used);
411 }
412 }
413 }
414
415
416 void brw_upload_state(struct brw_context *brw)
417 {
418 struct intel_context *intel = &brw->intel;
419 struct brw_state_flags *state = &brw->state.dirty;
420 int i;
421 static int dirty_count = 0;
422 const struct brw_tracked_state **atoms;
423 int num_atoms;
424
425 if (intel->gen >= 6) {
426 atoms = gen6_atoms;
427 num_atoms = ARRAY_SIZE(gen6_atoms);
428 } else {
429 atoms = gen4_atoms;
430 num_atoms = ARRAY_SIZE(gen4_atoms);
431 }
432
433 brw_clear_validated_bos(brw);
434
435 if (unlikely(INTEL_DEBUG)) {
436 /* Debug version which enforces various sanity checks on the
437 * state flags which are generated and checked to help ensure
438 * state atoms are ordered correctly in the list.
439 */
440 struct brw_state_flags examined, prev;
441 memset(&examined, 0, sizeof(examined));
442 prev = *state;
443
444 for (i = 0; i < num_atoms; i++) {
445 const struct brw_tracked_state *atom = atoms[i];
446 struct brw_state_flags generated;
447
448 assert(atom->dirty.mesa ||
449 atom->dirty.brw ||
450 atom->dirty.cache);
451
452 if (brw->intel.Fallback)
453 break;
454
455 if (check_state(state, &atom->dirty)) {
456 if (atom->emit) {
457 atom->emit( brw );
458 }
459 }
460
461 accumulate_state(&examined, &atom->dirty);
462
463 /* generated = (prev ^ state)
464 * if (examined & generated)
465 * fail;
466 */
467 xor_states(&generated, &prev, state);
468 assert(!check_state(&examined, &generated));
469 prev = *state;
470 }
471 }
472 else {
473 for (i = 0; i < num_atoms; i++) {
474 const struct brw_tracked_state *atom = atoms[i];
475
476 if (brw->intel.Fallback)
477 break;
478
479 if (check_state(state, &atom->dirty)) {
480 if (atom->emit) {
481 atom->emit( brw );
482 }
483 }
484 }
485 }
486
487 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
488 brw_update_dirty_count(mesa_bits, state->mesa);
489 brw_update_dirty_count(brw_bits, state->brw);
490 brw_update_dirty_count(cache_bits, state->cache);
491 if (dirty_count++ % 1000 == 0) {
492 brw_print_dirty_count(mesa_bits, state->mesa);
493 brw_print_dirty_count(brw_bits, state->brw);
494 brw_print_dirty_count(cache_bits, state->cache);
495 fprintf(stderr, "\n");
496 }
497 }
498
499 if (!brw->intel.Fallback)
500 memset(state, 0, sizeof(*state));
501 }