84e5c104bf3ba8d6ccd77ed8a1e65949f4628bd6
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "drivers/common/meta.h"
37 #include "intel_batchbuffer.h"
38 #include "intel_buffers.h"
39
40 static const struct brw_tracked_state *gen4_atoms[] =
41 {
42 &brw_vs_prog, /* must do before GS prog, state base address. */
43 &brw_ff_gs_prog, /* must do before state base address */
44
45 &brw_interpolation_map,
46
47 &brw_clip_prog, /* must do before state base address */
48 &brw_sf_prog, /* must do before state base address */
49 &brw_wm_prog, /* must do before state base address */
50
51 /* Once all the programs are done, we know how large urb entry
52 * sizes need to be and can decide if we need to change the urb
53 * layout.
54 */
55 &brw_curbe_offsets,
56 &brw_recalculate_urb_fence,
57
58 &brw_cc_vp,
59 &brw_cc_unit,
60
61 /* Surface state setup. Must come before the VS/WM unit. The binding
62 * table upload must be last.
63 */
64 &brw_vs_pull_constants,
65 &brw_wm_pull_constants,
66 &brw_renderbuffer_surfaces,
67 &brw_texture_surfaces,
68 &brw_vs_binding_table,
69 &brw_wm_binding_table,
70
71 &brw_fs_samplers,
72 &brw_vs_samplers,
73
74 /* These set up state for brw_psp_urb_cbs */
75 &brw_wm_unit,
76 &brw_sf_vp,
77 &brw_sf_unit,
78 &brw_vs_unit, /* always required, enabled or not */
79 &brw_clip_unit,
80 &brw_gs_unit,
81
82 /* Command packets:
83 */
84 &brw_invariant_state,
85 &brw_state_base_address,
86
87 &brw_binding_table_pointers,
88 &brw_blend_constant_color,
89
90 &brw_depthbuffer,
91
92 &brw_polygon_stipple,
93 &brw_polygon_stipple_offset,
94
95 &brw_line_stipple,
96 &brw_aa_line_parameters,
97
98 &brw_psp_urb_cbs,
99
100 &brw_drawing_rect,
101 &brw_indices, /* must come before brw_vertices */
102 &brw_index_buffer,
103 &brw_vertices,
104
105 &brw_constant_buffer
106 };
107
108 static const struct brw_tracked_state *gen6_atoms[] =
109 {
110 &brw_vs_prog, /* must do before state base address */
111 &brw_gs_prog, /* must do before state base address */
112 &brw_wm_prog, /* must do before state base address */
113
114 &gen6_clip_vp,
115 &gen6_sf_vp,
116
117 /* Command packets: */
118
119 /* must do before binding table pointers, cc state ptrs */
120 &brw_state_base_address,
121
122 &brw_cc_vp,
123 &gen6_viewport_state, /* must do after *_vp stages */
124
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129
130 &gen6_vs_push_constants, /* Before vs_state */
131 &gen6_gs_push_constants, /* Before gs_state */
132 &gen6_wm_push_constants, /* Before wm_state */
133
134 /* Surface state setup. Must come before the VS/WM unit. The binding
135 * table upload must be last.
136 */
137 &brw_vs_pull_constants,
138 &brw_vs_ubo_surfaces,
139 &brw_gs_pull_constants,
140 &brw_gs_ubo_surfaces,
141 &brw_wm_pull_constants,
142 &brw_wm_ubo_surfaces,
143 &gen6_renderbuffer_surfaces,
144 &brw_texture_surfaces,
145 &gen6_sol_surface,
146 &brw_vs_binding_table,
147 &gen6_gs_binding_table,
148 &brw_wm_binding_table,
149
150 &brw_fs_samplers,
151 &brw_vs_samplers,
152 &brw_gs_samplers,
153 &gen6_sampler_state,
154 &gen6_multisample_state,
155
156 &gen6_vs_state,
157 &gen6_gs_state,
158 &gen6_clip_state,
159 &gen6_sf_state,
160 &gen6_wm_state,
161
162 &gen6_scissor_state,
163
164 &gen6_binding_table_pointers,
165
166 &brw_depthbuffer,
167
168 &brw_polygon_stipple,
169 &brw_polygon_stipple_offset,
170
171 &brw_line_stipple,
172 &brw_aa_line_parameters,
173
174 &brw_drawing_rect,
175
176 &brw_indices, /* must come before brw_vertices */
177 &brw_index_buffer,
178 &brw_vertices,
179 };
180
181 static const struct brw_tracked_state *gen7_atoms[] =
182 {
183 &brw_vs_prog,
184 &brw_gs_prog,
185 &brw_wm_prog,
186
187 /* Command packets: */
188
189 /* must do before binding table pointers, cc state ptrs */
190 &brw_state_base_address,
191
192 &brw_cc_vp,
193 &gen7_sf_clip_viewport,
194
195 &gen7_push_constant_space,
196 &gen7_urb,
197 &gen6_blend_state, /* must do before cc unit */
198 &gen6_color_calc_state, /* must do before cc unit */
199 &gen6_depth_stencil_state, /* must do before cc unit */
200
201 &gen6_vs_push_constants, /* Before vs_state */
202 &gen6_gs_push_constants, /* Before gs_state */
203 &gen6_wm_push_constants, /* Before wm_surfaces and constant_buffer */
204
205 /* Surface state setup. Must come before the VS/WM unit. The binding
206 * table upload must be last.
207 */
208 &brw_vs_pull_constants,
209 &brw_vs_ubo_surfaces,
210 &brw_vs_abo_surfaces,
211 &brw_gs_pull_constants,
212 &brw_gs_ubo_surfaces,
213 &brw_gs_abo_surfaces,
214 &brw_wm_pull_constants,
215 &brw_wm_ubo_surfaces,
216 &brw_wm_abo_surfaces,
217 &gen6_renderbuffer_surfaces,
218 &brw_texture_surfaces,
219 &brw_vs_binding_table,
220 &brw_gs_binding_table,
221 &brw_wm_binding_table,
222
223 &brw_fs_samplers,
224 &brw_vs_samplers,
225 &brw_gs_samplers,
226 &gen6_multisample_state,
227
228 &gen7_disable_stages,
229 &gen7_vs_state,
230 &gen7_gs_state,
231 &gen7_sol_state,
232 &gen7_clip_state,
233 &gen7_sbe_state,
234 &gen7_sf_state,
235 &gen7_wm_state,
236 &gen7_ps_state,
237
238 &gen6_scissor_state,
239
240 &gen7_depthbuffer,
241
242 &brw_polygon_stipple,
243 &brw_polygon_stipple_offset,
244
245 &brw_line_stipple,
246 &brw_aa_line_parameters,
247
248 &brw_drawing_rect,
249
250 &brw_indices, /* must come before brw_vertices */
251 &brw_index_buffer,
252 &brw_vertices,
253
254 &haswell_cut_index,
255 };
256
257 static const struct brw_tracked_state *gen8_atoms[] =
258 {
259 &brw_vs_prog,
260 &brw_gs_prog,
261 &brw_wm_prog,
262
263 /* Command packets: */
264 &gen8_state_base_address,
265
266 &brw_cc_vp,
267 &gen8_sf_clip_viewport,
268
269 &gen7_push_constant_space,
270 &gen7_urb,
271 &gen8_blend_state,
272 &gen6_color_calc_state,
273
274 &gen6_vs_push_constants, /* Before vs_state */
275 &gen6_gs_push_constants, /* Before gs_state */
276 &gen6_wm_push_constants, /* Before wm_surfaces and constant_buffer */
277
278 /* Surface state setup. Must come before the VS/WM unit. The binding
279 * table upload must be last.
280 */
281 &brw_vs_pull_constants,
282 &brw_vs_ubo_surfaces,
283 &brw_vs_abo_surfaces,
284 &brw_gs_pull_constants,
285 &brw_gs_ubo_surfaces,
286 &brw_gs_abo_surfaces,
287 &brw_wm_pull_constants,
288 &brw_wm_ubo_surfaces,
289 &brw_wm_abo_surfaces,
290 &gen6_renderbuffer_surfaces,
291 &brw_texture_surfaces,
292 &brw_vs_binding_table,
293 &brw_gs_binding_table,
294 &brw_wm_binding_table,
295
296 &brw_fs_samplers,
297 &brw_vs_samplers,
298 &brw_gs_samplers,
299 &gen8_multisample_state,
300
301 &gen8_disable_stages,
302 &gen8_vs_state,
303 &gen8_gs_state,
304 &gen8_sol_state,
305 &gen6_clip_state,
306 &gen8_raster_state,
307 &gen8_sbe_state,
308 &gen8_sf_state,
309 &gen8_ps_blend,
310 &gen8_ps_extra,
311 &gen8_ps_state,
312 &gen8_wm_depth_stencil,
313 &gen8_wm_state,
314
315 &gen6_scissor_state,
316
317 &gen7_depthbuffer,
318
319 &brw_polygon_stipple,
320 &brw_polygon_stipple_offset,
321
322 &brw_line_stipple,
323 &brw_aa_line_parameters,
324
325 &brw_drawing_rect,
326
327 &gen8_vf_topology,
328
329 &brw_indices,
330 &gen8_index_buffer,
331 &gen8_vertices,
332
333 &haswell_cut_index,
334 &gen8_pma_fix,
335 };
336
337 static void
338 brw_upload_initial_gpu_state(struct brw_context *brw)
339 {
340 /* On platforms with hardware contexts, we can set our initial GPU state
341 * right away rather than doing it via state atoms. This saves a small
342 * amount of overhead on every draw call.
343 */
344 if (!brw->hw_ctx)
345 return;
346
347 if (brw->gen == 6)
348 intel_emit_post_sync_nonzero_flush(brw);
349
350 brw_upload_invariant_state(brw);
351
352 if (brw->gen >= 8) {
353 gen8_emit_3dstate_sample_pattern(brw);
354 }
355 }
356
357 void brw_init_state( struct brw_context *brw )
358 {
359 struct gl_context *ctx = &brw->ctx;
360 const struct brw_tracked_state **atoms;
361 int num_atoms;
362
363 STATIC_ASSERT(ARRAY_SIZE(gen4_atoms) <= ARRAY_SIZE(brw->atoms));
364 STATIC_ASSERT(ARRAY_SIZE(gen6_atoms) <= ARRAY_SIZE(brw->atoms));
365 STATIC_ASSERT(ARRAY_SIZE(gen7_atoms) <= ARRAY_SIZE(brw->atoms));
366 STATIC_ASSERT(ARRAY_SIZE(gen8_atoms) <= ARRAY_SIZE(brw->atoms));
367
368 brw_init_caches(brw);
369
370 if (brw->gen >= 8) {
371 atoms = gen8_atoms;
372 num_atoms = ARRAY_SIZE(gen8_atoms);
373 } else if (brw->gen == 7) {
374 atoms = gen7_atoms;
375 num_atoms = ARRAY_SIZE(gen7_atoms);
376 } else if (brw->gen == 6) {
377 atoms = gen6_atoms;
378 num_atoms = ARRAY_SIZE(gen6_atoms);
379 } else {
380 atoms = gen4_atoms;
381 num_atoms = ARRAY_SIZE(gen4_atoms);
382 }
383
384 brw->num_atoms = num_atoms;
385
386 /* This is to work around brw_context::atoms being declared const. We want
387 * it to be const, but it needs to be initialized somehow!
388 */
389 struct brw_tracked_state *context_atoms =
390 (struct brw_tracked_state *) &brw->atoms[0];
391
392 for (int i = 0; i < num_atoms; i++)
393 context_atoms[i] = *atoms[i];
394
395 while (num_atoms--) {
396 assert((*atoms)->dirty.mesa | (*atoms)->dirty.brw);
397 assert((*atoms)->emit);
398 atoms++;
399 }
400
401 brw_upload_initial_gpu_state(brw);
402
403 brw->state.dirty.mesa = ~0;
404 brw->state.dirty.brw = ~0ull;
405
406 /* ~0 is a nonsensical value which won't match anything we program, so
407 * the programming will take effect on the first time around.
408 */
409 brw->pma_stall_bits = ~0;
410
411 /* Make sure that brw->state.dirty.brw has enough bits to hold all possible
412 * dirty flags.
413 */
414 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->state.dirty.brw));
415
416 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
417 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
418 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
419 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
420 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
421 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
422 }
423
424
425 void brw_destroy_state( struct brw_context *brw )
426 {
427 brw_destroy_caches(brw);
428 }
429
430 /***********************************************************************
431 */
432
433 static bool
434 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
435 {
436 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
437 }
438
439 static void accumulate_state( struct brw_state_flags *a,
440 const struct brw_state_flags *b )
441 {
442 a->mesa |= b->mesa;
443 a->brw |= b->brw;
444 }
445
446
447 static void xor_states( struct brw_state_flags *result,
448 const struct brw_state_flags *a,
449 const struct brw_state_flags *b )
450 {
451 result->mesa = a->mesa ^ b->mesa;
452 result->brw = a->brw ^ b->brw;
453 }
454
455 struct dirty_bit_map {
456 uint64_t bit;
457 char *name;
458 uint32_t count;
459 };
460
461 #define DEFINE_BIT(name) {name, #name, 0}
462
463 static struct dirty_bit_map mesa_bits[] = {
464 DEFINE_BIT(_NEW_MODELVIEW),
465 DEFINE_BIT(_NEW_PROJECTION),
466 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
467 DEFINE_BIT(_NEW_COLOR),
468 DEFINE_BIT(_NEW_DEPTH),
469 DEFINE_BIT(_NEW_EVAL),
470 DEFINE_BIT(_NEW_FOG),
471 DEFINE_BIT(_NEW_HINT),
472 DEFINE_BIT(_NEW_LIGHT),
473 DEFINE_BIT(_NEW_LINE),
474 DEFINE_BIT(_NEW_PIXEL),
475 DEFINE_BIT(_NEW_POINT),
476 DEFINE_BIT(_NEW_POLYGON),
477 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
478 DEFINE_BIT(_NEW_SCISSOR),
479 DEFINE_BIT(_NEW_STENCIL),
480 DEFINE_BIT(_NEW_TEXTURE),
481 DEFINE_BIT(_NEW_TRANSFORM),
482 DEFINE_BIT(_NEW_VIEWPORT),
483 DEFINE_BIT(_NEW_ARRAY),
484 DEFINE_BIT(_NEW_RENDERMODE),
485 DEFINE_BIT(_NEW_BUFFERS),
486 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
487 DEFINE_BIT(_NEW_MULTISAMPLE),
488 DEFINE_BIT(_NEW_TRACK_MATRIX),
489 DEFINE_BIT(_NEW_PROGRAM),
490 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
491 DEFINE_BIT(_NEW_BUFFER_OBJECT),
492 DEFINE_BIT(_NEW_FRAG_CLAMP),
493 /* Avoid sign extension problems. */
494 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
495 {0, 0, 0}
496 };
497
498 static struct dirty_bit_map brw_bits[] = {
499 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
500 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
501 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
502 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
503 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
504 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
505 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
506 DEFINE_BIT(BRW_NEW_URB_FENCE),
507 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
508 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
509 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
510 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
511 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
512 DEFINE_BIT(BRW_NEW_PRIMITIVE),
513 DEFINE_BIT(BRW_NEW_CONTEXT),
514 DEFINE_BIT(BRW_NEW_PSP),
515 DEFINE_BIT(BRW_NEW_SURFACES),
516 DEFINE_BIT(BRW_NEW_VS_BINDING_TABLE),
517 DEFINE_BIT(BRW_NEW_GS_BINDING_TABLE),
518 DEFINE_BIT(BRW_NEW_PS_BINDING_TABLE),
519 DEFINE_BIT(BRW_NEW_INDICES),
520 DEFINE_BIT(BRW_NEW_VERTICES),
521 DEFINE_BIT(BRW_NEW_BATCH),
522 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
523 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
524 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
525 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
526 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
527 DEFINE_BIT(BRW_NEW_VUE_MAP_VS),
528 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
529 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
530 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
531 DEFINE_BIT(BRW_NEW_STATS_WM),
532 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
533 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
534 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
535 DEFINE_BIT(BRW_NEW_INTERPOLATION_MAP),
536 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
537 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
538 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
539 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
540 DEFINE_BIT(BRW_NEW_CC_VP),
541 DEFINE_BIT(BRW_NEW_SF_VP),
542 DEFINE_BIT(BRW_NEW_CLIP_VP),
543 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
544 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
545 {0, 0, 0}
546 };
547
548 static void
549 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
550 {
551 for (int i = 0; bit_map[i].bit != 0; i++) {
552 if (bit_map[i].bit & bits)
553 bit_map[i].count++;
554 }
555 }
556
557 static void
558 brw_print_dirty_count(struct dirty_bit_map *bit_map)
559 {
560 for (int i = 0; bit_map[i].bit != 0; i++) {
561 if (bit_map[i].count > 1) {
562 fprintf(stderr, "0x%016lx: %12d (%s)\n",
563 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
564 }
565 }
566 }
567
568 /***********************************************************************
569 * Emit all state:
570 */
571 void brw_upload_state(struct brw_context *brw)
572 {
573 struct gl_context *ctx = &brw->ctx;
574 struct brw_state_flags *state = &brw->state.dirty;
575 int i;
576 static int dirty_count = 0;
577
578 state->mesa |= brw->NewGLState;
579 brw->NewGLState = 0;
580
581 state->brw |= ctx->NewDriverState;
582 ctx->NewDriverState = 0;
583
584 if (0) {
585 /* Always re-emit all state. */
586 state->mesa |= ~0;
587 state->brw |= ~0ull;
588 }
589
590 if (brw->fragment_program != ctx->FragmentProgram._Current) {
591 brw->fragment_program = ctx->FragmentProgram._Current;
592 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
593 }
594
595 if (brw->geometry_program != ctx->GeometryProgram._Current) {
596 brw->geometry_program = ctx->GeometryProgram._Current;
597 brw->state.dirty.brw |= BRW_NEW_GEOMETRY_PROGRAM;
598 }
599
600 if (brw->vertex_program != ctx->VertexProgram._Current) {
601 brw->vertex_program = ctx->VertexProgram._Current;
602 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
603 }
604
605 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
606 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
607 brw->state.dirty.brw |= BRW_NEW_META_IN_PROGRESS;
608 }
609
610 if (brw->num_samples != ctx->DrawBuffer->Visual.samples) {
611 brw->num_samples = ctx->DrawBuffer->Visual.samples;
612 brw->state.dirty.brw |= BRW_NEW_NUM_SAMPLES;
613 }
614
615 if ((state->mesa | state->brw) == 0)
616 return;
617
618 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
619 if (brw->gen == 6)
620 intel_emit_post_sync_nonzero_flush(brw);
621
622 if (unlikely(INTEL_DEBUG)) {
623 /* Debug version which enforces various sanity checks on the
624 * state flags which are generated and checked to help ensure
625 * state atoms are ordered correctly in the list.
626 */
627 struct brw_state_flags examined, prev;
628 memset(&examined, 0, sizeof(examined));
629 prev = *state;
630
631 for (i = 0; i < brw->num_atoms; i++) {
632 const struct brw_tracked_state *atom = &brw->atoms[i];
633 struct brw_state_flags generated;
634
635 if (check_state(state, &atom->dirty)) {
636 atom->emit(brw);
637 }
638
639 accumulate_state(&examined, &atom->dirty);
640
641 /* generated = (prev ^ state)
642 * if (examined & generated)
643 * fail;
644 */
645 xor_states(&generated, &prev, state);
646 assert(!check_state(&examined, &generated));
647 prev = *state;
648 }
649 }
650 else {
651 for (i = 0; i < brw->num_atoms; i++) {
652 const struct brw_tracked_state *atom = &brw->atoms[i];
653
654 if (check_state(state, &atom->dirty)) {
655 atom->emit(brw);
656 }
657 }
658 }
659
660 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
661 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
662
663 brw_update_dirty_count(mesa_bits, state->mesa);
664 brw_update_dirty_count(brw_bits, state->brw);
665 if (dirty_count++ % 1000 == 0) {
666 brw_print_dirty_count(mesa_bits);
667 brw_print_dirty_count(brw_bits);
668 fprintf(stderr, "\n");
669 }
670 }
671 }
672
673
674 /**
675 * Clear dirty bits to account for the fact that the state emitted by
676 * brw_upload_state() has been committed to the hardware. This is a separate
677 * call from brw_upload_state() because it's possible that after the call to
678 * brw_upload_state(), we will discover that we've run out of aperture space,
679 * and need to rewind the batch buffer to the state it had before the
680 * brw_upload_state() call.
681 */
682 void
683 brw_clear_dirty_bits(struct brw_context *brw)
684 {
685 struct brw_state_flags *state = &brw->state.dirty;
686 memset(state, 0, sizeof(*state));
687 }