i965/cnl: Handle gen10 in switch cases across the driver
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 static void
49 brw_upload_initial_gpu_state(struct brw_context *brw)
50 {
51 /* On platforms with hardware contexts, we can set our initial GPU state
52 * right away rather than doing it via state atoms. This saves a small
53 * amount of overhead on every draw call.
54 */
55 if (!brw->hw_ctx)
56 return;
57
58 if (brw->gen == 6)
59 brw_emit_post_sync_nonzero_flush(brw);
60
61 brw_upload_invariant_state(brw);
62
63 /* Recommended optimization for Victim Cache eviction in pixel backend. */
64 if (brw->gen >= 9) {
65 BEGIN_BATCH(3);
66 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
67 OUT_BATCH(GEN7_CACHE_MODE_1);
68 OUT_BATCH(REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
69 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
70 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
71 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
72 ADVANCE_BATCH();
73 }
74
75 if (brw->gen >= 8) {
76 gen8_emit_3dstate_sample_pattern(brw);
77
78 BEGIN_BATCH(5);
79 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
80 OUT_BATCH(0);
81 OUT_BATCH(0);
82 OUT_BATCH(0);
83 OUT_BATCH(0);
84 ADVANCE_BATCH();
85
86 BEGIN_BATCH(2);
87 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
88 OUT_BATCH(0);
89 ADVANCE_BATCH();
90 }
91 }
92
93 static inline const struct brw_tracked_state *
94 brw_get_pipeline_atoms(struct brw_context *brw,
95 enum brw_pipeline pipeline)
96 {
97 switch (pipeline) {
98 case BRW_RENDER_PIPELINE:
99 return brw->render_atoms;
100 case BRW_COMPUTE_PIPELINE:
101 return brw->compute_atoms;
102 default:
103 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
104 unreachable("Unsupported pipeline");
105 return NULL;
106 }
107 }
108
109 void
110 brw_copy_pipeline_atoms(struct brw_context *brw,
111 enum brw_pipeline pipeline,
112 const struct brw_tracked_state **atoms,
113 int num_atoms)
114 {
115 /* This is to work around brw_context::atoms being declared const. We want
116 * it to be const, but it needs to be initialized somehow!
117 */
118 struct brw_tracked_state *context_atoms =
119 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
120
121 for (int i = 0; i < num_atoms; i++) {
122 context_atoms[i] = *atoms[i];
123 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
124 assert(context_atoms[i].emit);
125 }
126
127 brw->num_atoms[pipeline] = num_atoms;
128 }
129
130 void brw_init_state( struct brw_context *brw )
131 {
132 struct gl_context *ctx = &brw->ctx;
133
134 /* Force the first brw_select_pipeline to emit pipeline select */
135 brw->last_pipeline = BRW_NUM_PIPELINES;
136
137 brw_init_caches(brw);
138
139 if (brw->gen >= 10)
140 gen10_init_atoms(brw);
141 else if (brw->gen >= 9)
142 gen9_init_atoms(brw);
143 else if (brw->gen >= 8)
144 gen8_init_atoms(brw);
145 else if (brw->is_haswell)
146 gen75_init_atoms(brw);
147 else if (brw->gen >= 7)
148 gen7_init_atoms(brw);
149 else if (brw->gen >= 6)
150 gen6_init_atoms(brw);
151 else if (brw->gen >= 5)
152 gen5_init_atoms(brw);
153 else if (brw->is_g4x)
154 gen45_init_atoms(brw);
155 else
156 gen4_init_atoms(brw);
157
158 brw_upload_initial_gpu_state(brw);
159
160 brw->NewGLState = ~0;
161 brw->ctx.NewDriverState = ~0ull;
162
163 /* ~0 is a nonsensical value which won't match anything we program, so
164 * the programming will take effect on the first time around.
165 */
166 brw->pma_stall_bits = ~0;
167
168 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
169 * dirty flags.
170 */
171 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
172
173 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
174 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
175 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
176 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
177 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
178 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
179 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
180 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
181 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
182 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
183 }
184
185
186 void brw_destroy_state( struct brw_context *brw )
187 {
188 brw_destroy_caches(brw);
189 }
190
191 /***********************************************************************
192 */
193
194 static bool
195 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
196 {
197 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
198 }
199
200 static void accumulate_state( struct brw_state_flags *a,
201 const struct brw_state_flags *b )
202 {
203 a->mesa |= b->mesa;
204 a->brw |= b->brw;
205 }
206
207
208 static void xor_states( struct brw_state_flags *result,
209 const struct brw_state_flags *a,
210 const struct brw_state_flags *b )
211 {
212 result->mesa = a->mesa ^ b->mesa;
213 result->brw = a->brw ^ b->brw;
214 }
215
216 struct dirty_bit_map {
217 uint64_t bit;
218 char *name;
219 uint32_t count;
220 };
221
222 #define DEFINE_BIT(name) {name, #name, 0}
223
224 static struct dirty_bit_map mesa_bits[] = {
225 DEFINE_BIT(_NEW_MODELVIEW),
226 DEFINE_BIT(_NEW_PROJECTION),
227 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
228 DEFINE_BIT(_NEW_COLOR),
229 DEFINE_BIT(_NEW_DEPTH),
230 DEFINE_BIT(_NEW_EVAL),
231 DEFINE_BIT(_NEW_FOG),
232 DEFINE_BIT(_NEW_HINT),
233 DEFINE_BIT(_NEW_LIGHT),
234 DEFINE_BIT(_NEW_LINE),
235 DEFINE_BIT(_NEW_PIXEL),
236 DEFINE_BIT(_NEW_POINT),
237 DEFINE_BIT(_NEW_POLYGON),
238 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
239 DEFINE_BIT(_NEW_SCISSOR),
240 DEFINE_BIT(_NEW_STENCIL),
241 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
242 DEFINE_BIT(_NEW_TRANSFORM),
243 DEFINE_BIT(_NEW_VIEWPORT),
244 DEFINE_BIT(_NEW_TEXTURE_STATE),
245 DEFINE_BIT(_NEW_ARRAY),
246 DEFINE_BIT(_NEW_RENDERMODE),
247 DEFINE_BIT(_NEW_BUFFERS),
248 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
249 DEFINE_BIT(_NEW_MULTISAMPLE),
250 DEFINE_BIT(_NEW_TRACK_MATRIX),
251 DEFINE_BIT(_NEW_PROGRAM),
252 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
253 DEFINE_BIT(_NEW_BUFFER_OBJECT),
254 DEFINE_BIT(_NEW_FRAG_CLAMP),
255 /* Avoid sign extension problems. */
256 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
257 {0, 0, 0}
258 };
259
260 static struct dirty_bit_map brw_bits[] = {
261 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
262 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
263 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
264 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
265 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
266 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
267 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
268 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
269 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
270 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
271 DEFINE_BIT(BRW_NEW_URB_FENCE),
272 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
273 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
274 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
275 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
276 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
277 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
278 DEFINE_BIT(BRW_NEW_PRIMITIVE),
279 DEFINE_BIT(BRW_NEW_CONTEXT),
280 DEFINE_BIT(BRW_NEW_PSP),
281 DEFINE_BIT(BRW_NEW_SURFACES),
282 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
283 DEFINE_BIT(BRW_NEW_INDICES),
284 DEFINE_BIT(BRW_NEW_VERTICES),
285 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
286 DEFINE_BIT(BRW_NEW_BATCH),
287 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
288 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
289 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
290 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
291 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
292 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
293 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
294 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
295 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
296 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
297 DEFINE_BIT(BRW_NEW_STATS_WM),
298 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
299 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
300 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
301 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
302 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
303 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
304 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
305 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
306 DEFINE_BIT(BRW_NEW_CC_VP),
307 DEFINE_BIT(BRW_NEW_SF_VP),
308 DEFINE_BIT(BRW_NEW_CLIP_VP),
309 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
310 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
311 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
312 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
313 DEFINE_BIT(BRW_NEW_URB_SIZE),
314 DEFINE_BIT(BRW_NEW_CC_STATE),
315 DEFINE_BIT(BRW_NEW_BLORP),
316 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
317 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
318 {0, 0, 0}
319 };
320
321 static void
322 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
323 {
324 for (int i = 0; bit_map[i].bit != 0; i++) {
325 if (bit_map[i].bit & bits)
326 bit_map[i].count++;
327 }
328 }
329
330 static void
331 brw_print_dirty_count(struct dirty_bit_map *bit_map)
332 {
333 for (int i = 0; bit_map[i].bit != 0; i++) {
334 if (bit_map[i].count > 1) {
335 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
336 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
337 }
338 }
339 }
340
341 static inline void
342 brw_upload_tess_programs(struct brw_context *brw)
343 {
344 if (brw->tess_eval_program) {
345 brw_upload_tcs_prog(brw);
346 brw_upload_tes_prog(brw);
347 } else {
348 brw->tcs.base.prog_data = NULL;
349 brw->tes.base.prog_data = NULL;
350 }
351 }
352
353 static inline void
354 brw_upload_programs(struct brw_context *brw,
355 enum brw_pipeline pipeline)
356 {
357 struct gl_context *ctx = &brw->ctx;
358
359 if (pipeline == BRW_RENDER_PIPELINE) {
360 brw_upload_vs_prog(brw);
361 brw_upload_tess_programs(brw);
362
363 if (brw->gen < 6)
364 brw_upload_ff_gs_prog(brw);
365 else
366 brw_upload_gs_prog(brw);
367
368 /* Update the VUE map for data exiting the GS stage of the pipeline.
369 * This comes from the last enabled shader stage.
370 */
371 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
372 bool old_separate = brw->vue_map_geom_out.separate;
373 struct brw_vue_prog_data *vue_prog_data;
374 if (brw->geometry_program)
375 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
376 else if (brw->tess_eval_program)
377 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
378 else
379 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
380
381 brw->vue_map_geom_out = vue_prog_data->vue_map;
382
383 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
384 if (old_slots != brw->vue_map_geom_out.slots_valid ||
385 old_separate != brw->vue_map_geom_out.separate)
386 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
387
388 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
389 VARYING_BIT_VIEWPORT) {
390 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
391 brw->clip.viewport_count =
392 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
393 ctx->Const.MaxViewports : 1;
394 }
395
396 brw_upload_wm_prog(brw);
397
398 if (brw->gen < 6) {
399 brw_upload_clip_prog(brw);
400 brw_upload_sf_prog(brw);
401 }
402 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
403 brw_upload_cs_prog(brw);
404 }
405 }
406
407 static inline void
408 merge_ctx_state(struct brw_context *brw,
409 struct brw_state_flags *state)
410 {
411 state->mesa |= brw->NewGLState;
412 state->brw |= brw->ctx.NewDriverState;
413 }
414
415 static inline void
416 check_and_emit_atom(struct brw_context *brw,
417 struct brw_state_flags *state,
418 const struct brw_tracked_state *atom)
419 {
420 if (check_state(state, &atom->dirty)) {
421 atom->emit(brw);
422 merge_ctx_state(brw, state);
423 }
424 }
425
426 static inline void
427 brw_upload_pipeline_state(struct brw_context *brw,
428 enum brw_pipeline pipeline)
429 {
430 struct gl_context *ctx = &brw->ctx;
431 int i;
432 static int dirty_count = 0;
433 struct brw_state_flags state = brw->state.pipelines[pipeline];
434 unsigned int fb_samples = _mesa_geometric_samples(ctx->DrawBuffer);
435
436 brw_select_pipeline(brw, pipeline);
437
438 if (0) {
439 /* Always re-emit all state. */
440 brw->NewGLState = ~0;
441 ctx->NewDriverState = ~0ull;
442 }
443
444 if (pipeline == BRW_RENDER_PIPELINE) {
445 if (brw->fragment_program != ctx->FragmentProgram._Current) {
446 brw->fragment_program = ctx->FragmentProgram._Current;
447 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
448 }
449
450 if (brw->tess_eval_program != ctx->TessEvalProgram._Current) {
451 brw->tess_eval_program = ctx->TessEvalProgram._Current;
452 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
453 }
454
455 if (brw->tess_ctrl_program != ctx->TessCtrlProgram._Current) {
456 brw->tess_ctrl_program = ctx->TessCtrlProgram._Current;
457 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
458 }
459
460 if (brw->geometry_program != ctx->GeometryProgram._Current) {
461 brw->geometry_program = ctx->GeometryProgram._Current;
462 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
463 }
464
465 if (brw->vertex_program != ctx->VertexProgram._Current) {
466 brw->vertex_program = ctx->VertexProgram._Current;
467 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
468 }
469 }
470
471 if (brw->compute_program != ctx->ComputeProgram._Current) {
472 brw->compute_program = ctx->ComputeProgram._Current;
473 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
474 }
475
476 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
477 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
478 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
479 }
480
481 if (brw->num_samples != fb_samples) {
482 brw->num_samples = fb_samples;
483 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
484 }
485
486 /* Exit early if no state is flagged as dirty */
487 merge_ctx_state(brw, &state);
488 if ((state.mesa | state.brw) == 0)
489 return;
490
491 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
492 if (brw->gen == 6)
493 brw_emit_post_sync_nonzero_flush(brw);
494
495 brw_upload_programs(brw, pipeline);
496 merge_ctx_state(brw, &state);
497
498 brw_upload_state_base_address(brw);
499
500 const struct brw_tracked_state *atoms =
501 brw_get_pipeline_atoms(brw, pipeline);
502 const int num_atoms = brw->num_atoms[pipeline];
503
504 if (unlikely(INTEL_DEBUG)) {
505 /* Debug version which enforces various sanity checks on the
506 * state flags which are generated and checked to help ensure
507 * state atoms are ordered correctly in the list.
508 */
509 struct brw_state_flags examined, prev;
510 memset(&examined, 0, sizeof(examined));
511 prev = state;
512
513 for (i = 0; i < num_atoms; i++) {
514 const struct brw_tracked_state *atom = &atoms[i];
515 struct brw_state_flags generated;
516
517 check_and_emit_atom(brw, &state, atom);
518
519 accumulate_state(&examined, &atom->dirty);
520
521 /* generated = (prev ^ state)
522 * if (examined & generated)
523 * fail;
524 */
525 xor_states(&generated, &prev, &state);
526 assert(!check_state(&examined, &generated));
527 prev = state;
528 }
529 }
530 else {
531 for (i = 0; i < num_atoms; i++) {
532 const struct brw_tracked_state *atom = &atoms[i];
533
534 check_and_emit_atom(brw, &state, atom);
535 }
536 }
537
538 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
539 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
540
541 brw_update_dirty_count(mesa_bits, state.mesa);
542 brw_update_dirty_count(brw_bits, state.brw);
543 if (dirty_count++ % 1000 == 0) {
544 brw_print_dirty_count(mesa_bits);
545 brw_print_dirty_count(brw_bits);
546 fprintf(stderr, "\n");
547 }
548 }
549 }
550
551 /***********************************************************************
552 * Emit all state:
553 */
554 void brw_upload_render_state(struct brw_context *brw)
555 {
556 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
557 }
558
559 static inline void
560 brw_pipeline_state_finished(struct brw_context *brw,
561 enum brw_pipeline pipeline)
562 {
563 /* Save all dirty state into the other pipelines */
564 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
565 if (i != pipeline) {
566 brw->state.pipelines[i].mesa |= brw->NewGLState;
567 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
568 } else {
569 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
570 }
571 }
572
573 brw->NewGLState = 0;
574 brw->ctx.NewDriverState = 0ull;
575 }
576
577 /**
578 * Clear dirty bits to account for the fact that the state emitted by
579 * brw_upload_render_state() has been committed to the hardware. This is a
580 * separate call from brw_upload_render_state() because it's possible that
581 * after the call to brw_upload_render_state(), we will discover that we've
582 * run out of aperture space, and need to rewind the batch buffer to the state
583 * it had before the brw_upload_render_state() call.
584 */
585 void
586 brw_render_state_finished(struct brw_context *brw)
587 {
588 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
589 }
590
591 void
592 brw_upload_compute_state(struct brw_context *brw)
593 {
594 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
595 }
596
597 void
598 brw_compute_state_finished(struct brw_context *brw)
599 {
600 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
601 }