i965/gen8+: Fix the number of dwords programmed in MI_FLUSH_DW
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 static void
49 brw_upload_initial_gpu_state(struct brw_context *brw)
50 {
51 const struct gen_device_info *devinfo = &brw->screen->devinfo;
52
53 /* On platforms with hardware contexts, we can set our initial GPU state
54 * right away rather than doing it via state atoms. This saves a small
55 * amount of overhead on every draw call.
56 */
57 if (!brw->hw_ctx)
58 return;
59
60 if (devinfo->gen == 6)
61 brw_emit_post_sync_nonzero_flush(brw);
62
63 brw_upload_invariant_state(brw);
64
65 if (devinfo->gen == 10) {
66 brw_load_register_imm32(brw, GEN10_CACHE_MODE_SS,
67 REG_MASK(GEN10_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
68 GEN10_FLOAT_BLEND_OPTIMIZATION_ENABLE);
69
70 /* From gen10 workaround table in h/w specs:
71 *
72 * "On 3DSTATE_3D_MODE, driver must always program bits 31:16 of DW1
73 * a value of 0xFFFF"
74 *
75 * This means that we end up setting the entire 3D_MODE state. Bits
76 * in this register control things such as slice hashing and we want
77 * the default values of zero at the moment.
78 */
79 BEGIN_BATCH(2);
80 OUT_BATCH(_3DSTATE_3D_MODE << 16 | (2 - 2));
81 OUT_BATCH(0xFFFF << 16);
82 ADVANCE_BATCH();
83 }
84
85 if (devinfo->gen == 9) {
86 /* Recommended optimizations for Victim Cache eviction and floating
87 * point blending.
88 */
89 BEGIN_BATCH(3);
90 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
91 OUT_BATCH(GEN7_CACHE_MODE_1);
92 OUT_BATCH(REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
93 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
94 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
95 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
96 ADVANCE_BATCH();
97
98 if (gen_device_info_is_9lp(devinfo)) {
99 BEGIN_BATCH(3);
100 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
101 OUT_BATCH(GEN7_GT_MODE);
102 OUT_BATCH(GEN9_SUBSLICE_HASHING_MASK_BITS |
103 GEN9_SUBSLICE_HASHING_16x16);
104 ADVANCE_BATCH();
105 }
106 }
107
108 if (devinfo->gen >= 8) {
109 gen8_emit_3dstate_sample_pattern(brw);
110
111 BEGIN_BATCH(5);
112 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
113 OUT_BATCH(0);
114 OUT_BATCH(0);
115 OUT_BATCH(0);
116 OUT_BATCH(0);
117 ADVANCE_BATCH();
118
119 BEGIN_BATCH(2);
120 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
121 OUT_BATCH(0);
122 ADVANCE_BATCH();
123 }
124 }
125
126 static inline const struct brw_tracked_state *
127 brw_get_pipeline_atoms(struct brw_context *brw,
128 enum brw_pipeline pipeline)
129 {
130 switch (pipeline) {
131 case BRW_RENDER_PIPELINE:
132 return brw->render_atoms;
133 case BRW_COMPUTE_PIPELINE:
134 return brw->compute_atoms;
135 default:
136 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
137 unreachable("Unsupported pipeline");
138 return NULL;
139 }
140 }
141
142 void
143 brw_copy_pipeline_atoms(struct brw_context *brw,
144 enum brw_pipeline pipeline,
145 const struct brw_tracked_state **atoms,
146 int num_atoms)
147 {
148 /* This is to work around brw_context::atoms being declared const. We want
149 * it to be const, but it needs to be initialized somehow!
150 */
151 struct brw_tracked_state *context_atoms =
152 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
153
154 for (int i = 0; i < num_atoms; i++) {
155 context_atoms[i] = *atoms[i];
156 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
157 assert(context_atoms[i].emit);
158 }
159
160 brw->num_atoms[pipeline] = num_atoms;
161 }
162
163 void brw_init_state( struct brw_context *brw )
164 {
165 struct gl_context *ctx = &brw->ctx;
166 const struct gen_device_info *devinfo = &brw->screen->devinfo;
167
168 /* Force the first brw_select_pipeline to emit pipeline select */
169 brw->last_pipeline = BRW_NUM_PIPELINES;
170
171 brw_init_caches(brw);
172
173 if (devinfo->gen >= 10)
174 gen10_init_atoms(brw);
175 else if (devinfo->gen >= 9)
176 gen9_init_atoms(brw);
177 else if (devinfo->gen >= 8)
178 gen8_init_atoms(brw);
179 else if (devinfo->is_haswell)
180 gen75_init_atoms(brw);
181 else if (devinfo->gen >= 7)
182 gen7_init_atoms(brw);
183 else if (devinfo->gen >= 6)
184 gen6_init_atoms(brw);
185 else if (devinfo->gen >= 5)
186 gen5_init_atoms(brw);
187 else if (devinfo->is_g4x)
188 gen45_init_atoms(brw);
189 else
190 gen4_init_atoms(brw);
191
192 brw_upload_initial_gpu_state(brw);
193
194 brw->NewGLState = ~0;
195 brw->ctx.NewDriverState = ~0ull;
196
197 /* ~0 is a nonsensical value which won't match anything we program, so
198 * the programming will take effect on the first time around.
199 */
200 brw->pma_stall_bits = ~0;
201
202 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
203 * dirty flags.
204 */
205 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
206
207 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
208 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
209 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
210 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
211 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
212 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
213 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
214 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
215 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
216 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
217 }
218
219
220 void brw_destroy_state( struct brw_context *brw )
221 {
222 brw_destroy_caches(brw);
223 }
224
225 /***********************************************************************
226 */
227
228 static bool
229 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
230 {
231 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
232 }
233
234 static void accumulate_state( struct brw_state_flags *a,
235 const struct brw_state_flags *b )
236 {
237 a->mesa |= b->mesa;
238 a->brw |= b->brw;
239 }
240
241
242 static void xor_states( struct brw_state_flags *result,
243 const struct brw_state_flags *a,
244 const struct brw_state_flags *b )
245 {
246 result->mesa = a->mesa ^ b->mesa;
247 result->brw = a->brw ^ b->brw;
248 }
249
250 struct dirty_bit_map {
251 uint64_t bit;
252 char *name;
253 uint32_t count;
254 };
255
256 #define DEFINE_BIT(name) {name, #name, 0}
257
258 static struct dirty_bit_map mesa_bits[] = {
259 DEFINE_BIT(_NEW_MODELVIEW),
260 DEFINE_BIT(_NEW_PROJECTION),
261 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
262 DEFINE_BIT(_NEW_COLOR),
263 DEFINE_BIT(_NEW_DEPTH),
264 DEFINE_BIT(_NEW_EVAL),
265 DEFINE_BIT(_NEW_FOG),
266 DEFINE_BIT(_NEW_HINT),
267 DEFINE_BIT(_NEW_LIGHT),
268 DEFINE_BIT(_NEW_LINE),
269 DEFINE_BIT(_NEW_PIXEL),
270 DEFINE_BIT(_NEW_POINT),
271 DEFINE_BIT(_NEW_POLYGON),
272 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
273 DEFINE_BIT(_NEW_SCISSOR),
274 DEFINE_BIT(_NEW_STENCIL),
275 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
276 DEFINE_BIT(_NEW_TRANSFORM),
277 DEFINE_BIT(_NEW_VIEWPORT),
278 DEFINE_BIT(_NEW_TEXTURE_STATE),
279 DEFINE_BIT(_NEW_ARRAY),
280 DEFINE_BIT(_NEW_RENDERMODE),
281 DEFINE_BIT(_NEW_BUFFERS),
282 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
283 DEFINE_BIT(_NEW_MULTISAMPLE),
284 DEFINE_BIT(_NEW_TRACK_MATRIX),
285 DEFINE_BIT(_NEW_PROGRAM),
286 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
287 DEFINE_BIT(_NEW_FRAG_CLAMP),
288 /* Avoid sign extension problems. */
289 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
290 {0, 0, 0}
291 };
292
293 static struct dirty_bit_map brw_bits[] = {
294 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
295 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
296 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
297 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
298 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
299 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
300 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
301 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
302 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
303 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
304 DEFINE_BIT(BRW_NEW_URB_FENCE),
305 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
306 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
307 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
308 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
309 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
310 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
311 DEFINE_BIT(BRW_NEW_PRIMITIVE),
312 DEFINE_BIT(BRW_NEW_CONTEXT),
313 DEFINE_BIT(BRW_NEW_PSP),
314 DEFINE_BIT(BRW_NEW_SURFACES),
315 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
316 DEFINE_BIT(BRW_NEW_INDICES),
317 DEFINE_BIT(BRW_NEW_VERTICES),
318 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
319 DEFINE_BIT(BRW_NEW_BATCH),
320 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
321 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
322 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
323 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
324 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
325 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
326 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
327 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
328 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
329 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
330 DEFINE_BIT(BRW_NEW_STATS_WM),
331 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
332 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
333 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
334 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
335 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
336 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
337 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
338 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
339 DEFINE_BIT(BRW_NEW_CC_VP),
340 DEFINE_BIT(BRW_NEW_SF_VP),
341 DEFINE_BIT(BRW_NEW_CLIP_VP),
342 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
343 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
344 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
345 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
346 DEFINE_BIT(BRW_NEW_URB_SIZE),
347 DEFINE_BIT(BRW_NEW_CC_STATE),
348 DEFINE_BIT(BRW_NEW_BLORP),
349 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
350 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
351 DEFINE_BIT(BRW_NEW_DRAW_CALL),
352 DEFINE_BIT(BRW_NEW_AUX_STATE),
353 {0, 0, 0}
354 };
355
356 static void
357 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
358 {
359 for (int i = 0; bit_map[i].bit != 0; i++) {
360 if (bit_map[i].bit & bits)
361 bit_map[i].count++;
362 }
363 }
364
365 static void
366 brw_print_dirty_count(struct dirty_bit_map *bit_map)
367 {
368 for (int i = 0; bit_map[i].bit != 0; i++) {
369 if (bit_map[i].count > 1) {
370 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
371 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
372 }
373 }
374 }
375
376 static inline void
377 brw_upload_tess_programs(struct brw_context *brw)
378 {
379 if (brw->programs[MESA_SHADER_TESS_EVAL]) {
380 brw_upload_tcs_prog(brw);
381 brw_upload_tes_prog(brw);
382 } else {
383 brw->tcs.base.prog_data = NULL;
384 brw->tes.base.prog_data = NULL;
385 }
386 }
387
388 static inline void
389 brw_upload_programs(struct brw_context *brw,
390 enum brw_pipeline pipeline)
391 {
392 struct gl_context *ctx = &brw->ctx;
393 const struct gen_device_info *devinfo = &brw->screen->devinfo;
394
395 if (pipeline == BRW_RENDER_PIPELINE) {
396 brw_upload_vs_prog(brw);
397 brw_upload_tess_programs(brw);
398
399 if (brw->programs[MESA_SHADER_GEOMETRY]) {
400 brw_upload_gs_prog(brw);
401 } else {
402 brw->gs.base.prog_data = NULL;
403 if (devinfo->gen < 7)
404 brw_upload_ff_gs_prog(brw);
405 }
406
407 /* Update the VUE map for data exiting the GS stage of the pipeline.
408 * This comes from the last enabled shader stage.
409 */
410 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
411 bool old_separate = brw->vue_map_geom_out.separate;
412 struct brw_vue_prog_data *vue_prog_data;
413 if (brw->programs[MESA_SHADER_GEOMETRY])
414 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
415 else if (brw->programs[MESA_SHADER_TESS_EVAL])
416 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
417 else
418 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
419
420 brw->vue_map_geom_out = vue_prog_data->vue_map;
421
422 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
423 if (old_slots != brw->vue_map_geom_out.slots_valid ||
424 old_separate != brw->vue_map_geom_out.separate)
425 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
426
427 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
428 VARYING_BIT_VIEWPORT) {
429 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
430 brw->clip.viewport_count =
431 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
432 ctx->Const.MaxViewports : 1;
433 }
434
435 brw_upload_wm_prog(brw);
436
437 if (devinfo->gen < 6) {
438 brw_upload_clip_prog(brw);
439 brw_upload_sf_prog(brw);
440 }
441
442 brw_disk_cache_write_render_programs(brw);
443 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
444 brw_upload_cs_prog(brw);
445 brw_disk_cache_write_compute_program(brw);
446 }
447 }
448
449 static inline void
450 merge_ctx_state(struct brw_context *brw,
451 struct brw_state_flags *state)
452 {
453 state->mesa |= brw->NewGLState;
454 state->brw |= brw->ctx.NewDriverState;
455 }
456
457 static ALWAYS_INLINE void
458 check_and_emit_atom(struct brw_context *brw,
459 struct brw_state_flags *state,
460 const struct brw_tracked_state *atom)
461 {
462 if (check_state(state, &atom->dirty)) {
463 atom->emit(brw);
464 merge_ctx_state(brw, state);
465 }
466 }
467
468 static inline void
469 brw_upload_pipeline_state(struct brw_context *brw,
470 enum brw_pipeline pipeline)
471 {
472 const struct gen_device_info *devinfo = &brw->screen->devinfo;
473 struct gl_context *ctx = &brw->ctx;
474 int i;
475 static int dirty_count = 0;
476 struct brw_state_flags state = brw->state.pipelines[pipeline];
477 const unsigned fb_samples =
478 MAX2(_mesa_geometric_samples(ctx->DrawBuffer), 1);
479
480 brw_select_pipeline(brw, pipeline);
481
482 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
483 /* Always re-emit all state. */
484 brw->NewGLState = ~0;
485 ctx->NewDriverState = ~0ull;
486 }
487
488 if (pipeline == BRW_RENDER_PIPELINE) {
489 if (brw->programs[MESA_SHADER_FRAGMENT] !=
490 ctx->FragmentProgram._Current) {
491 brw->programs[MESA_SHADER_FRAGMENT] = ctx->FragmentProgram._Current;
492 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
493 }
494
495 if (brw->programs[MESA_SHADER_TESS_EVAL] !=
496 ctx->TessEvalProgram._Current) {
497 brw->programs[MESA_SHADER_TESS_EVAL] = ctx->TessEvalProgram._Current;
498 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
499 }
500
501 if (brw->programs[MESA_SHADER_TESS_CTRL] !=
502 ctx->TessCtrlProgram._Current) {
503 brw->programs[MESA_SHADER_TESS_CTRL] = ctx->TessCtrlProgram._Current;
504 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
505 }
506
507 if (brw->programs[MESA_SHADER_GEOMETRY] !=
508 ctx->GeometryProgram._Current) {
509 brw->programs[MESA_SHADER_GEOMETRY] = ctx->GeometryProgram._Current;
510 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
511 }
512
513 if (brw->programs[MESA_SHADER_VERTEX] != ctx->VertexProgram._Current) {
514 brw->programs[MESA_SHADER_VERTEX] = ctx->VertexProgram._Current;
515 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
516 }
517 }
518
519 if (brw->programs[MESA_SHADER_COMPUTE] != ctx->ComputeProgram._Current) {
520 brw->programs[MESA_SHADER_COMPUTE] = ctx->ComputeProgram._Current;
521 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
522 }
523
524 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
525 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
526 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
527 }
528
529 if (brw->num_samples != fb_samples) {
530 brw->num_samples = fb_samples;
531 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
532 }
533
534 /* Exit early if no state is flagged as dirty */
535 merge_ctx_state(brw, &state);
536 if ((state.mesa | state.brw) == 0)
537 return;
538
539 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
540 if (devinfo->gen == 6)
541 brw_emit_post_sync_nonzero_flush(brw);
542
543 brw_upload_programs(brw, pipeline);
544 merge_ctx_state(brw, &state);
545
546 brw_upload_state_base_address(brw);
547
548 const struct brw_tracked_state *atoms =
549 brw_get_pipeline_atoms(brw, pipeline);
550 const int num_atoms = brw->num_atoms[pipeline];
551
552 if (unlikely(INTEL_DEBUG)) {
553 /* Debug version which enforces various sanity checks on the
554 * state flags which are generated and checked to help ensure
555 * state atoms are ordered correctly in the list.
556 */
557 struct brw_state_flags examined, prev;
558 memset(&examined, 0, sizeof(examined));
559 prev = state;
560
561 for (i = 0; i < num_atoms; i++) {
562 const struct brw_tracked_state *atom = &atoms[i];
563 struct brw_state_flags generated;
564
565 check_and_emit_atom(brw, &state, atom);
566
567 accumulate_state(&examined, &atom->dirty);
568
569 /* generated = (prev ^ state)
570 * if (examined & generated)
571 * fail;
572 */
573 xor_states(&generated, &prev, &state);
574 assert(!check_state(&examined, &generated));
575 prev = state;
576 }
577 }
578 else {
579 for (i = 0; i < num_atoms; i++) {
580 const struct brw_tracked_state *atom = &atoms[i];
581
582 check_and_emit_atom(brw, &state, atom);
583 }
584 }
585
586 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
587 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
588
589 brw_update_dirty_count(mesa_bits, state.mesa);
590 brw_update_dirty_count(brw_bits, state.brw);
591 if (dirty_count++ % 1000 == 0) {
592 brw_print_dirty_count(mesa_bits);
593 brw_print_dirty_count(brw_bits);
594 fprintf(stderr, "\n");
595 }
596 }
597 }
598
599 /***********************************************************************
600 * Emit all state:
601 */
602 void brw_upload_render_state(struct brw_context *brw)
603 {
604 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
605 }
606
607 static inline void
608 brw_pipeline_state_finished(struct brw_context *brw,
609 enum brw_pipeline pipeline)
610 {
611 /* Save all dirty state into the other pipelines */
612 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
613 if (i != pipeline) {
614 brw->state.pipelines[i].mesa |= brw->NewGLState;
615 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
616 } else {
617 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
618 }
619 }
620
621 brw->NewGLState = 0;
622 brw->ctx.NewDriverState = 0ull;
623 }
624
625 /**
626 * Clear dirty bits to account for the fact that the state emitted by
627 * brw_upload_render_state() has been committed to the hardware. This is a
628 * separate call from brw_upload_render_state() because it's possible that
629 * after the call to brw_upload_render_state(), we will discover that we've
630 * run out of aperture space, and need to rewind the batch buffer to the state
631 * it had before the brw_upload_render_state() call.
632 */
633 void
634 brw_render_state_finished(struct brw_context *brw)
635 {
636 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
637 }
638
639 void
640 brw_upload_compute_state(struct brw_context *brw)
641 {
642 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
643 }
644
645 void
646 brw_compute_state_finished(struct brw_context *brw)
647 {
648 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
649 }