i965: Get real per-gen atom lists
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 static void
49 brw_upload_initial_gpu_state(struct brw_context *brw)
50 {
51 /* On platforms with hardware contexts, we can set our initial GPU state
52 * right away rather than doing it via state atoms. This saves a small
53 * amount of overhead on every draw call.
54 */
55 if (!brw->hw_ctx)
56 return;
57
58 if (brw->gen == 6)
59 brw_emit_post_sync_nonzero_flush(brw);
60
61 brw_upload_invariant_state(brw);
62
63 /* Recommended optimization for Victim Cache eviction in pixel backend. */
64 if (brw->gen >= 9) {
65 BEGIN_BATCH(3);
66 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
67 OUT_BATCH(GEN7_CACHE_MODE_1);
68 OUT_BATCH(REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
69 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
70 ADVANCE_BATCH();
71 }
72
73 if (brw->gen >= 8) {
74 gen8_emit_3dstate_sample_pattern(brw);
75
76 BEGIN_BATCH(5);
77 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
78 OUT_BATCH(0);
79 OUT_BATCH(0);
80 OUT_BATCH(0);
81 OUT_BATCH(0);
82 ADVANCE_BATCH();
83
84 BEGIN_BATCH(2);
85 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
86 OUT_BATCH(0);
87 ADVANCE_BATCH();
88 }
89 }
90
91 static inline const struct brw_tracked_state *
92 brw_get_pipeline_atoms(struct brw_context *brw,
93 enum brw_pipeline pipeline)
94 {
95 switch (pipeline) {
96 case BRW_RENDER_PIPELINE:
97 return brw->render_atoms;
98 case BRW_COMPUTE_PIPELINE:
99 return brw->compute_atoms;
100 default:
101 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
102 unreachable("Unsupported pipeline");
103 return NULL;
104 }
105 }
106
107 void
108 brw_copy_pipeline_atoms(struct brw_context *brw,
109 enum brw_pipeline pipeline,
110 const struct brw_tracked_state **atoms,
111 int num_atoms)
112 {
113 /* This is to work around brw_context::atoms being declared const. We want
114 * it to be const, but it needs to be initialized somehow!
115 */
116 struct brw_tracked_state *context_atoms =
117 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
118
119 for (int i = 0; i < num_atoms; i++) {
120 context_atoms[i] = *atoms[i];
121 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
122 assert(context_atoms[i].emit);
123 }
124
125 brw->num_atoms[pipeline] = num_atoms;
126 }
127
128 void brw_init_state( struct brw_context *brw )
129 {
130 struct gl_context *ctx = &brw->ctx;
131
132 /* Force the first brw_select_pipeline to emit pipeline select */
133 brw->last_pipeline = BRW_NUM_PIPELINES;
134
135 brw_init_caches(brw);
136
137 if (brw->gen >= 9)
138 gen9_init_atoms(brw);
139 else if (brw->gen >= 8)
140 gen8_init_atoms(brw);
141 else if (brw->is_haswell)
142 gen75_init_atoms(brw);
143 else if (brw->gen >= 7)
144 gen7_init_atoms(brw);
145 else if (brw->gen >= 6)
146 gen6_init_atoms(brw);
147 else if (brw->gen >= 5)
148 gen5_init_atoms(brw);
149 else if (brw->is_g4x)
150 gen45_init_atoms(brw);
151 else
152 gen4_init_atoms(brw);
153
154 brw_upload_initial_gpu_state(brw);
155
156 brw->NewGLState = ~0;
157 brw->ctx.NewDriverState = ~0ull;
158
159 /* ~0 is a nonsensical value which won't match anything we program, so
160 * the programming will take effect on the first time around.
161 */
162 brw->pma_stall_bits = ~0;
163
164 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
165 * dirty flags.
166 */
167 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
168
169 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
170 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
171 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
172 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
173 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
174 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
175 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
176 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
177 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
178 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
179 }
180
181
182 void brw_destroy_state( struct brw_context *brw )
183 {
184 brw_destroy_caches(brw);
185 }
186
187 /***********************************************************************
188 */
189
190 static bool
191 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
192 {
193 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
194 }
195
196 static void accumulate_state( struct brw_state_flags *a,
197 const struct brw_state_flags *b )
198 {
199 a->mesa |= b->mesa;
200 a->brw |= b->brw;
201 }
202
203
204 static void xor_states( struct brw_state_flags *result,
205 const struct brw_state_flags *a,
206 const struct brw_state_flags *b )
207 {
208 result->mesa = a->mesa ^ b->mesa;
209 result->brw = a->brw ^ b->brw;
210 }
211
212 struct dirty_bit_map {
213 uint64_t bit;
214 char *name;
215 uint32_t count;
216 };
217
218 #define DEFINE_BIT(name) {name, #name, 0}
219
220 static struct dirty_bit_map mesa_bits[] = {
221 DEFINE_BIT(_NEW_MODELVIEW),
222 DEFINE_BIT(_NEW_PROJECTION),
223 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
224 DEFINE_BIT(_NEW_COLOR),
225 DEFINE_BIT(_NEW_DEPTH),
226 DEFINE_BIT(_NEW_EVAL),
227 DEFINE_BIT(_NEW_FOG),
228 DEFINE_BIT(_NEW_HINT),
229 DEFINE_BIT(_NEW_LIGHT),
230 DEFINE_BIT(_NEW_LINE),
231 DEFINE_BIT(_NEW_PIXEL),
232 DEFINE_BIT(_NEW_POINT),
233 DEFINE_BIT(_NEW_POLYGON),
234 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
235 DEFINE_BIT(_NEW_SCISSOR),
236 DEFINE_BIT(_NEW_STENCIL),
237 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
238 DEFINE_BIT(_NEW_TRANSFORM),
239 DEFINE_BIT(_NEW_VIEWPORT),
240 DEFINE_BIT(_NEW_TEXTURE_STATE),
241 DEFINE_BIT(_NEW_ARRAY),
242 DEFINE_BIT(_NEW_RENDERMODE),
243 DEFINE_BIT(_NEW_BUFFERS),
244 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
245 DEFINE_BIT(_NEW_MULTISAMPLE),
246 DEFINE_BIT(_NEW_TRACK_MATRIX),
247 DEFINE_BIT(_NEW_PROGRAM),
248 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
249 DEFINE_BIT(_NEW_BUFFER_OBJECT),
250 DEFINE_BIT(_NEW_FRAG_CLAMP),
251 /* Avoid sign extension problems. */
252 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
253 {0, 0, 0}
254 };
255
256 static struct dirty_bit_map brw_bits[] = {
257 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
258 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
259 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
260 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
261 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
262 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
263 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
264 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
265 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
266 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
267 DEFINE_BIT(BRW_NEW_URB_FENCE),
268 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
269 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
270 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
271 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
272 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
273 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
274 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
275 DEFINE_BIT(BRW_NEW_PRIMITIVE),
276 DEFINE_BIT(BRW_NEW_CONTEXT),
277 DEFINE_BIT(BRW_NEW_PSP),
278 DEFINE_BIT(BRW_NEW_SURFACES),
279 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
280 DEFINE_BIT(BRW_NEW_INDICES),
281 DEFINE_BIT(BRW_NEW_VERTICES),
282 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
283 DEFINE_BIT(BRW_NEW_BATCH),
284 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
285 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
286 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
287 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
288 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
289 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
290 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
291 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
292 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
293 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
294 DEFINE_BIT(BRW_NEW_STATS_WM),
295 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
296 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
297 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
298 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
299 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
300 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
301 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
302 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
303 DEFINE_BIT(BRW_NEW_CC_VP),
304 DEFINE_BIT(BRW_NEW_SF_VP),
305 DEFINE_BIT(BRW_NEW_CLIP_VP),
306 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
307 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
308 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
309 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
310 DEFINE_BIT(BRW_NEW_URB_SIZE),
311 DEFINE_BIT(BRW_NEW_CC_STATE),
312 DEFINE_BIT(BRW_NEW_BLORP),
313 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
314 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
315 {0, 0, 0}
316 };
317
318 static void
319 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
320 {
321 for (int i = 0; bit_map[i].bit != 0; i++) {
322 if (bit_map[i].bit & bits)
323 bit_map[i].count++;
324 }
325 }
326
327 static void
328 brw_print_dirty_count(struct dirty_bit_map *bit_map)
329 {
330 for (int i = 0; bit_map[i].bit != 0; i++) {
331 if (bit_map[i].count > 1) {
332 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
333 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
334 }
335 }
336 }
337
338 static inline void
339 brw_upload_tess_programs(struct brw_context *brw)
340 {
341 if (brw->tess_eval_program) {
342 brw_upload_tcs_prog(brw);
343 brw_upload_tes_prog(brw);
344 } else {
345 brw->tcs.base.prog_data = NULL;
346 brw->tes.base.prog_data = NULL;
347 }
348 }
349
350 static inline void
351 brw_upload_programs(struct brw_context *brw,
352 enum brw_pipeline pipeline)
353 {
354 struct gl_context *ctx = &brw->ctx;
355
356 if (pipeline == BRW_RENDER_PIPELINE) {
357 brw_upload_vs_prog(brw);
358 brw_upload_tess_programs(brw);
359
360 if (brw->gen < 6)
361 brw_upload_ff_gs_prog(brw);
362 else
363 brw_upload_gs_prog(brw);
364
365 /* Update the VUE map for data exiting the GS stage of the pipeline.
366 * This comes from the last enabled shader stage.
367 */
368 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
369 bool old_separate = brw->vue_map_geom_out.separate;
370 struct brw_vue_prog_data *vue_prog_data;
371 if (brw->geometry_program)
372 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
373 else if (brw->tess_eval_program)
374 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
375 else
376 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
377
378 brw->vue_map_geom_out = vue_prog_data->vue_map;
379
380 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
381 if (old_slots != brw->vue_map_geom_out.slots_valid ||
382 old_separate != brw->vue_map_geom_out.separate)
383 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
384
385 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
386 VARYING_BIT_VIEWPORT) {
387 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
388 brw->clip.viewport_count =
389 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
390 ctx->Const.MaxViewports : 1;
391 }
392
393 brw_upload_wm_prog(brw);
394
395 if (brw->gen < 6) {
396 brw_upload_clip_prog(brw);
397 brw_upload_sf_prog(brw);
398 }
399 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
400 brw_upload_cs_prog(brw);
401 }
402 }
403
404 static inline void
405 merge_ctx_state(struct brw_context *brw,
406 struct brw_state_flags *state)
407 {
408 state->mesa |= brw->NewGLState;
409 state->brw |= brw->ctx.NewDriverState;
410 }
411
412 static inline void
413 check_and_emit_atom(struct brw_context *brw,
414 struct brw_state_flags *state,
415 const struct brw_tracked_state *atom)
416 {
417 if (check_state(state, &atom->dirty)) {
418 atom->emit(brw);
419 merge_ctx_state(brw, state);
420 }
421 }
422
423 static inline void
424 brw_upload_pipeline_state(struct brw_context *brw,
425 enum brw_pipeline pipeline)
426 {
427 struct gl_context *ctx = &brw->ctx;
428 int i;
429 static int dirty_count = 0;
430 struct brw_state_flags state = brw->state.pipelines[pipeline];
431 unsigned int fb_samples = _mesa_geometric_samples(ctx->DrawBuffer);
432
433 brw_select_pipeline(brw, pipeline);
434
435 if (0) {
436 /* Always re-emit all state. */
437 brw->NewGLState = ~0;
438 ctx->NewDriverState = ~0ull;
439 }
440
441 if (pipeline == BRW_RENDER_PIPELINE) {
442 if (brw->fragment_program != ctx->FragmentProgram._Current) {
443 brw->fragment_program = ctx->FragmentProgram._Current;
444 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
445 }
446
447 if (brw->tess_eval_program != ctx->TessEvalProgram._Current) {
448 brw->tess_eval_program = ctx->TessEvalProgram._Current;
449 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
450 }
451
452 if (brw->tess_ctrl_program != ctx->TessCtrlProgram._Current) {
453 brw->tess_ctrl_program = ctx->TessCtrlProgram._Current;
454 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
455 }
456
457 if (brw->geometry_program != ctx->GeometryProgram._Current) {
458 brw->geometry_program = ctx->GeometryProgram._Current;
459 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
460 }
461
462 if (brw->vertex_program != ctx->VertexProgram._Current) {
463 brw->vertex_program = ctx->VertexProgram._Current;
464 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
465 }
466 }
467
468 if (brw->compute_program != ctx->ComputeProgram._Current) {
469 brw->compute_program = ctx->ComputeProgram._Current;
470 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
471 }
472
473 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
474 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
475 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
476 }
477
478 if (brw->num_samples != fb_samples) {
479 brw->num_samples = fb_samples;
480 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
481 }
482
483 /* Exit early if no state is flagged as dirty */
484 merge_ctx_state(brw, &state);
485 if ((state.mesa | state.brw) == 0)
486 return;
487
488 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
489 if (brw->gen == 6)
490 brw_emit_post_sync_nonzero_flush(brw);
491
492 brw_upload_programs(brw, pipeline);
493 merge_ctx_state(brw, &state);
494
495 brw_upload_state_base_address(brw);
496
497 const struct brw_tracked_state *atoms =
498 brw_get_pipeline_atoms(brw, pipeline);
499 const int num_atoms = brw->num_atoms[pipeline];
500
501 if (unlikely(INTEL_DEBUG)) {
502 /* Debug version which enforces various sanity checks on the
503 * state flags which are generated and checked to help ensure
504 * state atoms are ordered correctly in the list.
505 */
506 struct brw_state_flags examined, prev;
507 memset(&examined, 0, sizeof(examined));
508 prev = state;
509
510 for (i = 0; i < num_atoms; i++) {
511 const struct brw_tracked_state *atom = &atoms[i];
512 struct brw_state_flags generated;
513
514 check_and_emit_atom(brw, &state, atom);
515
516 accumulate_state(&examined, &atom->dirty);
517
518 /* generated = (prev ^ state)
519 * if (examined & generated)
520 * fail;
521 */
522 xor_states(&generated, &prev, &state);
523 assert(!check_state(&examined, &generated));
524 prev = state;
525 }
526 }
527 else {
528 for (i = 0; i < num_atoms; i++) {
529 const struct brw_tracked_state *atom = &atoms[i];
530
531 check_and_emit_atom(brw, &state, atom);
532 }
533 }
534
535 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
536 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
537
538 brw_update_dirty_count(mesa_bits, state.mesa);
539 brw_update_dirty_count(brw_bits, state.brw);
540 if (dirty_count++ % 1000 == 0) {
541 brw_print_dirty_count(mesa_bits);
542 brw_print_dirty_count(brw_bits);
543 fprintf(stderr, "\n");
544 }
545 }
546 }
547
548 /***********************************************************************
549 * Emit all state:
550 */
551 void brw_upload_render_state(struct brw_context *brw)
552 {
553 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
554 }
555
556 static inline void
557 brw_pipeline_state_finished(struct brw_context *brw,
558 enum brw_pipeline pipeline)
559 {
560 /* Save all dirty state into the other pipelines */
561 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
562 if (i != pipeline) {
563 brw->state.pipelines[i].mesa |= brw->NewGLState;
564 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
565 } else {
566 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
567 }
568 }
569
570 brw->NewGLState = 0;
571 brw->ctx.NewDriverState = 0ull;
572 }
573
574 /**
575 * Clear dirty bits to account for the fact that the state emitted by
576 * brw_upload_render_state() has been committed to the hardware. This is a
577 * separate call from brw_upload_render_state() because it's possible that
578 * after the call to brw_upload_render_state(), we will discover that we've
579 * run out of aperture space, and need to rewind the batch buffer to the state
580 * it had before the brw_upload_render_state() call.
581 */
582 void
583 brw_render_state_finished(struct brw_context *brw)
584 {
585 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
586 }
587
588 void
589 brw_upload_compute_state(struct brw_context *brw)
590 {
591 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
592 }
593
594 void
595 brw_compute_state_finished(struct brw_context *brw)
596 {
597 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
598 }