9d93431941ccc8df6e34c8e95467dcf8bc1a7244
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "drivers/common/meta.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_buffers.h"
39
40 static const struct brw_tracked_state *gen4_atoms[] =
41 {
42 &brw_vs_prog, /* must do before GS prog, state base address. */
43 &brw_ff_gs_prog, /* must do before state base address */
44
45 &brw_interpolation_map,
46
47 &brw_clip_prog, /* must do before state base address */
48 &brw_sf_prog, /* must do before state base address */
49 &brw_wm_prog, /* must do before state base address */
50
51 /* Once all the programs are done, we know how large urb entry
52 * sizes need to be and can decide if we need to change the urb
53 * layout.
54 */
55 &brw_curbe_offsets,
56 &brw_recalculate_urb_fence,
57
58 &brw_cc_vp,
59 &brw_cc_unit,
60
61 /* Surface state setup. Must come before the VS/WM unit. The binding
62 * table upload must be last.
63 */
64 &brw_vs_pull_constants,
65 &brw_wm_pull_constants,
66 &brw_renderbuffer_surfaces,
67 &brw_texture_surfaces,
68 &brw_vs_binding_table,
69 &brw_wm_binding_table,
70
71 &brw_fs_samplers,
72 &brw_vs_samplers,
73
74 /* These set up state for brw_psp_urb_cbs */
75 &brw_wm_unit,
76 &brw_sf_vp,
77 &brw_sf_unit,
78 &brw_vs_unit, /* always required, enabled or not */
79 &brw_clip_unit,
80 &brw_gs_unit,
81
82 /* Command packets:
83 */
84 &brw_invariant_state,
85 &brw_state_base_address,
86
87 &brw_binding_table_pointers,
88 &brw_blend_constant_color,
89
90 &brw_depthbuffer,
91
92 &brw_polygon_stipple,
93 &brw_polygon_stipple_offset,
94
95 &brw_line_stipple,
96 &brw_aa_line_parameters,
97
98 &brw_psp_urb_cbs,
99
100 &brw_drawing_rect,
101 &brw_indices,
102 &brw_index_buffer,
103 &brw_vertices,
104
105 &brw_constant_buffer
106 };
107
108 static const struct brw_tracked_state *gen6_atoms[] =
109 {
110 &brw_vs_prog, /* must do before state base address */
111 &brw_ff_gs_prog, /* must do before state base address */
112 &brw_wm_prog, /* must do before state base address */
113
114 &gen6_clip_vp,
115 &gen6_sf_vp,
116
117 /* Command packets: */
118
119 /* must do before binding table pointers, cc state ptrs */
120 &brw_state_base_address,
121
122 &brw_cc_vp,
123 &gen6_viewport_state, /* must do after *_vp stages */
124
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129
130 &gen6_vs_push_constants, /* Before vs_state */
131 &gen6_wm_push_constants, /* Before wm_state */
132
133 /* Surface state setup. Must come before the VS/WM unit. The binding
134 * table upload must be last.
135 */
136 &brw_vs_pull_constants,
137 &brw_vs_ubo_surfaces,
138 &brw_wm_pull_constants,
139 &brw_wm_ubo_surfaces,
140 &gen6_renderbuffer_surfaces,
141 &brw_texture_surfaces,
142 &gen6_sol_surface,
143 &brw_vs_binding_table,
144 &gen6_gs_binding_table,
145 &brw_wm_binding_table,
146
147 &brw_fs_samplers,
148 &brw_vs_samplers,
149 &gen6_sampler_state,
150 &gen6_multisample_state,
151
152 &gen6_vs_state,
153 &gen6_gs_state,
154 &gen6_clip_state,
155 &gen6_sf_state,
156 &gen6_wm_state,
157
158 &gen6_scissor_state,
159
160 &gen6_binding_table_pointers,
161
162 &brw_depthbuffer,
163
164 &brw_polygon_stipple,
165 &brw_polygon_stipple_offset,
166
167 &brw_line_stipple,
168 &brw_aa_line_parameters,
169
170 &brw_drawing_rect,
171
172 &brw_indices,
173 &brw_index_buffer,
174 &brw_vertices,
175 };
176
177 static const struct brw_tracked_state *gen7_atoms[] =
178 {
179 &brw_vs_prog,
180 &brw_gs_prog,
181 &brw_wm_prog,
182
183 /* Command packets: */
184
185 /* must do before binding table pointers, cc state ptrs */
186 &brw_state_base_address,
187
188 &brw_cc_vp,
189 &gen7_cc_viewport_state_pointer, /* must do after brw_cc_vp */
190 &gen7_sf_clip_viewport,
191
192 &gen7_push_constant_space,
193 &gen7_urb,
194 &gen6_blend_state, /* must do before cc unit */
195 &gen6_color_calc_state, /* must do before cc unit */
196 &gen6_depth_stencil_state, /* must do before cc unit */
197
198 &gen6_vs_push_constants, /* Before vs_state */
199 &gen7_gs_push_constants, /* Before gs_state */
200 &gen6_wm_push_constants, /* Before wm_surfaces and constant_buffer */
201
202 /* Surface state setup. Must come before the VS/WM unit. The binding
203 * table upload must be last.
204 */
205 &brw_vs_pull_constants,
206 &brw_vs_ubo_surfaces,
207 &brw_vs_abo_surfaces,
208 &brw_gs_pull_constants,
209 &brw_gs_ubo_surfaces,
210 &brw_gs_abo_surfaces,
211 &brw_wm_pull_constants,
212 &brw_wm_ubo_surfaces,
213 &brw_wm_abo_surfaces,
214 &gen6_renderbuffer_surfaces,
215 &brw_texture_surfaces,
216 &brw_vs_binding_table,
217 &brw_gs_binding_table,
218 &brw_wm_binding_table,
219
220 &brw_fs_samplers,
221 &brw_vs_samplers,
222 &brw_gs_samplers,
223 &gen6_multisample_state,
224
225 &gen7_disable_stages,
226 &gen7_vs_state,
227 &gen7_gs_state,
228 &gen7_sol_state,
229 &gen7_clip_state,
230 &gen7_sbe_state,
231 &gen7_sf_state,
232 &gen7_wm_state,
233 &gen7_ps_state,
234
235 &gen6_scissor_state,
236
237 &gen7_depthbuffer,
238
239 &brw_polygon_stipple,
240 &brw_polygon_stipple_offset,
241
242 &brw_line_stipple,
243 &brw_aa_line_parameters,
244
245 &brw_drawing_rect,
246
247 &brw_indices,
248 &brw_index_buffer,
249 &brw_vertices,
250
251 &haswell_cut_index,
252 };
253
254 static const struct brw_tracked_state *gen8_atoms[] =
255 {
256 &brw_vs_prog,
257 &brw_gs_prog,
258 &brw_wm_prog,
259
260 /* Command packets: */
261 &gen8_state_base_address,
262
263 &brw_cc_vp,
264 &gen7_cc_viewport_state_pointer, /* must do after brw_cc_vp */
265 &gen8_sf_clip_viewport,
266
267 &gen7_push_constant_space,
268 &gen7_urb,
269 &gen8_blend_state,
270 &gen6_color_calc_state,
271
272 &gen6_vs_push_constants, /* Before vs_state */
273 &gen7_gs_push_constants, /* Before gs_state */
274 &gen6_wm_push_constants, /* Before wm_surfaces and constant_buffer */
275
276 /* Surface state setup. Must come before the VS/WM unit. The binding
277 * table upload must be last.
278 */
279 &brw_vs_pull_constants,
280 &brw_vs_ubo_surfaces,
281 &brw_vs_abo_surfaces,
282 &brw_gs_pull_constants,
283 &brw_gs_ubo_surfaces,
284 &brw_gs_abo_surfaces,
285 &brw_wm_pull_constants,
286 &brw_wm_ubo_surfaces,
287 &brw_wm_abo_surfaces,
288 &gen6_renderbuffer_surfaces,
289 &brw_texture_surfaces,
290 &brw_vs_binding_table,
291 &brw_gs_binding_table,
292 &brw_wm_binding_table,
293
294 &brw_fs_samplers,
295 &brw_vs_samplers,
296 &brw_gs_samplers,
297 &gen8_multisample_state,
298
299 &gen8_disable_stages,
300 &gen8_vs_state,
301 &gen8_gs_state,
302 &gen8_sol_state,
303 &gen6_clip_state,
304 &gen8_raster_state,
305 &gen8_sbe_state,
306 &gen8_sf_state,
307 &gen8_ps_blend,
308 &gen8_ps_extra,
309 &gen8_ps_state,
310 &gen8_wm_depth_stencil,
311 &gen8_wm_state,
312
313 &gen6_scissor_state,
314
315 &gen7_depthbuffer,
316
317 &brw_polygon_stipple,
318 &brw_polygon_stipple_offset,
319
320 &brw_line_stipple,
321 &brw_aa_line_parameters,
322
323 &brw_drawing_rect,
324
325 &gen8_vf_topology,
326
327 &brw_indices,
328 &gen8_index_buffer,
329 &gen8_vertices,
330
331 &haswell_cut_index,
332 };
333
334 static const struct brw_tracked_state *gen7_compute_atoms[] =
335 {
336 };
337
338
339 static void
340 brw_upload_initial_gpu_state(struct brw_context *brw)
341 {
342 /* On platforms with hardware contexts, we can set our initial GPU state
343 * right away rather than doing it via state atoms. This saves a small
344 * amount of overhead on every draw call.
345 */
346 if (!brw->hw_ctx)
347 return;
348
349 brw_upload_invariant_state(brw);
350
351 if (brw->gen >= 8) {
352 gen8_emit_3dstate_sample_pattern(brw);
353 }
354 }
355
356 void brw_init_state( struct brw_context *brw )
357 {
358 struct gl_context *ctx = &brw->ctx;
359 int i, j;
360
361 brw_init_caches(brw);
362
363 memset(brw->atoms, 0, sizeof(brw->atoms));
364 memset(brw->num_atoms, 0, sizeof(brw->num_atoms));
365
366 if (brw->gen >= 8) {
367 brw->atoms[BRW_PIPELINE_3D] = gen8_atoms;
368 brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen8_atoms);
369 } else if (brw->gen == 7) {
370 brw->atoms[BRW_PIPELINE_3D] = gen7_atoms;
371 brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen7_atoms);
372 brw->atoms[BRW_PIPELINE_COMPUTE] = gen7_compute_atoms;
373 brw->num_atoms[BRW_PIPELINE_COMPUTE] = ARRAY_SIZE(gen7_compute_atoms);
374 } else if (brw->gen == 6) {
375 brw->atoms[BRW_PIPELINE_3D] = gen6_atoms;
376 brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen6_atoms);
377 } else {
378 brw->atoms[BRW_PIPELINE_3D] = gen4_atoms;
379 brw->num_atoms[BRW_PIPELINE_3D] = ARRAY_SIZE(gen4_atoms);
380 }
381
382 for (i = 0; i < BRW_NUM_PIPELINES; i++) {
383 for (j = 0; j < brw->num_atoms[i]; j++) {
384 assert(brw->atoms[i][j]->dirty.mesa |
385 brw->atoms[i][j]->dirty.brw |
386 brw->atoms[i][j]->dirty.cache);
387 assert(brw->atoms[i][j]->emit);
388 }
389 }
390
391 brw_upload_initial_gpu_state(brw);
392
393 SET_DIRTY_ALL(mesa);
394 SET_DIRTY64_ALL(brw);
395
396 /* Make sure that brw->state.dirty.brw has enough bits to hold all possible
397 * dirty flags.
398 */
399 STATIC_ASSERT(BRW_NUM_STATE_BITS <=
400 8 * sizeof(brw->state.pipeline_dirty[0].brw));
401
402 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
403 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
404 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
405 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
406 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
407 }
408
409
410 void brw_destroy_state( struct brw_context *brw )
411 {
412 brw_destroy_caches(brw);
413 }
414
415 /***********************************************************************
416 */
417
418 static bool
419 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
420 {
421 return ((a->mesa & b->mesa) |
422 (a->brw & b->brw) |
423 (a->cache & b->cache)) != 0;
424 }
425
426 static void accumulate_state( struct brw_state_flags *a,
427 const struct brw_state_flags *b )
428 {
429 a->mesa |= b->mesa;
430 a->brw |= b->brw;
431 a->cache |= b->cache;
432 }
433
434
435 static void xor_states( struct brw_state_flags *result,
436 const struct brw_state_flags *a,
437 const struct brw_state_flags *b )
438 {
439 result->mesa = a->mesa ^ b->mesa;
440 result->brw = a->brw ^ b->brw;
441 result->cache = a->cache ^ b->cache;
442 }
443
444 struct dirty_bit_map {
445 uint32_t bit;
446 char *name;
447 uint32_t count;
448 };
449
450 #define DEFINE_BIT(name) {name, #name, 0}
451
452 static struct dirty_bit_map mesa_bits[] = {
453 DEFINE_BIT(_NEW_MODELVIEW),
454 DEFINE_BIT(_NEW_PROJECTION),
455 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
456 DEFINE_BIT(_NEW_COLOR),
457 DEFINE_BIT(_NEW_DEPTH),
458 DEFINE_BIT(_NEW_EVAL),
459 DEFINE_BIT(_NEW_FOG),
460 DEFINE_BIT(_NEW_HINT),
461 DEFINE_BIT(_NEW_LIGHT),
462 DEFINE_BIT(_NEW_LINE),
463 DEFINE_BIT(_NEW_PIXEL),
464 DEFINE_BIT(_NEW_POINT),
465 DEFINE_BIT(_NEW_POLYGON),
466 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
467 DEFINE_BIT(_NEW_SCISSOR),
468 DEFINE_BIT(_NEW_STENCIL),
469 DEFINE_BIT(_NEW_TEXTURE),
470 DEFINE_BIT(_NEW_TRANSFORM),
471 DEFINE_BIT(_NEW_VIEWPORT),
472 DEFINE_BIT(_NEW_ARRAY),
473 DEFINE_BIT(_NEW_RENDERMODE),
474 DEFINE_BIT(_NEW_BUFFERS),
475 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
476 DEFINE_BIT(_NEW_MULTISAMPLE),
477 DEFINE_BIT(_NEW_TRACK_MATRIX),
478 DEFINE_BIT(_NEW_PROGRAM),
479 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
480 DEFINE_BIT(_NEW_BUFFER_OBJECT),
481 DEFINE_BIT(_NEW_FRAG_CLAMP),
482 DEFINE_BIT(_NEW_VARYING_VP_INPUTS),
483 {0, 0, 0}
484 };
485
486 static struct dirty_bit_map brw_bits[] = {
487 DEFINE_BIT(BRW_NEW_URB_FENCE),
488 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
489 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
490 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
491 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
492 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
493 DEFINE_BIT(BRW_NEW_PRIMITIVE),
494 DEFINE_BIT(BRW_NEW_CONTEXT),
495 DEFINE_BIT(BRW_NEW_PSP),
496 DEFINE_BIT(BRW_NEW_SURFACES),
497 DEFINE_BIT(BRW_NEW_VS_BINDING_TABLE),
498 DEFINE_BIT(BRW_NEW_GS_BINDING_TABLE),
499 DEFINE_BIT(BRW_NEW_PS_BINDING_TABLE),
500 DEFINE_BIT(BRW_NEW_INDICES),
501 DEFINE_BIT(BRW_NEW_VERTICES),
502 DEFINE_BIT(BRW_NEW_BATCH),
503 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
504 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
505 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
506 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
507 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
508 DEFINE_BIT(BRW_NEW_VUE_MAP_VS),
509 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
510 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
511 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
512 DEFINE_BIT(BRW_NEW_STATS_WM),
513 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
514 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
515 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
516 DEFINE_BIT(BRW_NEW_INTERPOLATION_MAP),
517 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
518 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
519 {0, 0, 0}
520 };
521
522 static struct dirty_bit_map cache_bits[] = {
523 DEFINE_BIT(CACHE_NEW_CC_VP),
524 DEFINE_BIT(CACHE_NEW_CC_UNIT),
525 DEFINE_BIT(CACHE_NEW_WM_PROG),
526 DEFINE_BIT(CACHE_NEW_BLORP_BLIT_PROG),
527 DEFINE_BIT(CACHE_NEW_BLORP_CONST_COLOR_PROG),
528 DEFINE_BIT(CACHE_NEW_SAMPLER),
529 DEFINE_BIT(CACHE_NEW_WM_UNIT),
530 DEFINE_BIT(CACHE_NEW_SF_PROG),
531 DEFINE_BIT(CACHE_NEW_SF_VP),
532 DEFINE_BIT(CACHE_NEW_SF_UNIT),
533 DEFINE_BIT(CACHE_NEW_VS_UNIT),
534 DEFINE_BIT(CACHE_NEW_VS_PROG),
535 DEFINE_BIT(CACHE_NEW_FF_GS_UNIT),
536 DEFINE_BIT(CACHE_NEW_FF_GS_PROG),
537 DEFINE_BIT(CACHE_NEW_GS_PROG),
538 DEFINE_BIT(CACHE_NEW_CLIP_VP),
539 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
540 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
541 {0, 0, 0}
542 };
543
544
545 static void
546 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
547 {
548 int i;
549
550 for (i = 0; i < 32; i++) {
551 if (bit_map[i].bit == 0)
552 return;
553
554 if (bit_map[i].bit & bits)
555 bit_map[i].count++;
556 }
557 }
558
559 static void
560 brw_print_dirty_count(struct dirty_bit_map *bit_map)
561 {
562 int i;
563
564 for (i = 0; i < 32; i++) {
565 if (bit_map[i].bit == 0)
566 return;
567
568 fprintf(stderr, "0x%08x: %12d (%s)\n",
569 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
570 }
571 }
572
573 /***********************************************************************
574 * Emit all state:
575 */
576 void brw_upload_state(struct brw_context *brw, brw_pipeline pipeline)
577 {
578 struct gl_context *ctx = &brw->ctx;
579 struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
580 int i;
581 static int dirty_count = 0;
582
583 assert(0 <= pipeline && pipeline < BRW_NUM_PIPELINES);
584 brw->state.current_pipeline = pipeline;
585
586 SET_DIRTY_BIT(mesa, brw->NewGLState);
587 brw->NewGLState = 0;
588
589 SET_DIRTY_BIT(brw, ctx->NewDriverState);
590 ctx->NewDriverState = 0;
591
592 if (0) {
593 /* Always re-emit all state. */
594 state->mesa |= ~0;
595 state->brw |= ~0;
596 state->cache |= ~0;
597 }
598
599 if (brw->fragment_program != ctx->FragmentProgram._Current) {
600 brw->fragment_program = ctx->FragmentProgram._Current;
601 SET_DIRTY_BIT(brw, BRW_NEW_FRAGMENT_PROGRAM);
602 }
603
604 if (brw->geometry_program != ctx->GeometryProgram._Current) {
605 brw->geometry_program = ctx->GeometryProgram._Current;
606 SET_DIRTY_BIT(brw, BRW_NEW_GEOMETRY_PROGRAM);
607 }
608
609 if (brw->vertex_program != ctx->VertexProgram._Current) {
610 brw->vertex_program = ctx->VertexProgram._Current;
611 SET_DIRTY_BIT(brw, BRW_NEW_VERTEX_PROGRAM);
612 }
613
614 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
615 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
616 SET_DIRTY_BIT(brw, BRW_NEW_META_IN_PROGRESS);
617 }
618
619 if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
620 brw->num_samples = ctx->DrawBuffer->Visual.samples;
621 SET_DIRTY_BIT(brw, BRW_NEW_NUM_SAMPLES);
622 }
623
624 if ((state->mesa | state->cache | state->brw) == 0)
625 return;
626
627 if (unlikely(INTEL_DEBUG)) {
628 /* Debug version which enforces various sanity checks on the
629 * state flags which are generated and checked to help ensure
630 * state atoms are ordered correctly in the list.
631 */
632 struct brw_state_flags examined, prev;
633 memset(&examined, 0, sizeof(examined));
634 prev = *state;
635
636 for (i = 0; i < brw->num_atoms[pipeline]; i++) {
637 const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
638 struct brw_state_flags generated;
639
640 if (check_state(state, &atom->dirty)) {
641 atom->emit(brw);
642 }
643
644 accumulate_state(&examined, &atom->dirty);
645
646 /* generated = (prev ^ state)
647 * if (examined & generated)
648 * fail;
649 */
650 xor_states(&generated, &prev, state);
651 assert(!check_state(&examined, &generated));
652 prev = *state;
653 }
654 }
655 else {
656 for (i = 0; i < brw->num_atoms[pipeline]; i++) {
657 const struct brw_tracked_state *atom = brw->atoms[pipeline][i];
658
659 if (check_state(state, &atom->dirty)) {
660 atom->emit(brw);
661 }
662 }
663 }
664
665 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
666 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
667 STATIC_ASSERT(ARRAY_SIZE(cache_bits) == BRW_MAX_CACHE + 1);
668
669 brw_update_dirty_count(mesa_bits, state->mesa);
670 brw_update_dirty_count(brw_bits, state->brw);
671 brw_update_dirty_count(cache_bits, state->cache);
672 if (dirty_count++ % 1000 == 0) {
673 brw_print_dirty_count(mesa_bits);
674 brw_print_dirty_count(brw_bits);
675 brw_print_dirty_count(cache_bits);
676 fprintf(stderr, "\n");
677 }
678 }
679 }
680
681
682 /**
683 * Clear dirty bits to account for the fact that the state emitted by
684 * brw_upload_state() has been committed to the hardware. This is a separate
685 * call from brw_upload_state() because it's possible that after the call to
686 * brw_upload_state(), we will discover that we've run out of aperture space,
687 * and need to rewind the batch buffer to the state it had before the
688 * brw_upload_state() call.
689 */
690 void
691 brw_clear_dirty_bits(struct brw_context *brw, brw_pipeline pipeline)
692 {
693 struct brw_state_flags *state = &brw->state.pipeline_dirty[pipeline];
694 memset(state, 0, sizeof(*state));
695 }