16f44d03bbeacf0ef4353901454f55d6557fc7a2
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 static void
49 brw_upload_initial_gpu_state(struct brw_context *brw)
50 {
51 const struct gen_device_info *devinfo = &brw->screen->devinfo;
52
53 /* On platforms with hardware contexts, we can set our initial GPU state
54 * right away rather than doing it via state atoms. This saves a small
55 * amount of overhead on every draw call.
56 */
57 if (!brw->hw_ctx)
58 return;
59
60 if (devinfo->gen == 6)
61 brw_emit_post_sync_nonzero_flush(brw);
62
63 brw_upload_invariant_state(brw);
64
65 if (devinfo->gen == 9) {
66 /* Recommended optimizations for Victim Cache eviction and floating
67 * point blending.
68 */
69 BEGIN_BATCH(3);
70 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
71 OUT_BATCH(GEN7_CACHE_MODE_1);
72 OUT_BATCH(REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
73 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
74 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
75 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
76 ADVANCE_BATCH();
77
78 if (gen_device_info_is_9lp(devinfo)) {
79 BEGIN_BATCH(3);
80 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
81 OUT_BATCH(GEN7_GT_MODE);
82 OUT_BATCH(GEN9_SUBSLICE_HASHING_MASK_BITS |
83 GEN9_SUBSLICE_HASHING_16x16);
84 ADVANCE_BATCH();
85 }
86 }
87
88 if (devinfo->gen >= 8) {
89 gen8_emit_3dstate_sample_pattern(brw);
90
91 BEGIN_BATCH(5);
92 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
93 OUT_BATCH(0);
94 OUT_BATCH(0);
95 OUT_BATCH(0);
96 OUT_BATCH(0);
97 ADVANCE_BATCH();
98
99 BEGIN_BATCH(2);
100 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
101 OUT_BATCH(0);
102 ADVANCE_BATCH();
103 }
104
105 /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
106 * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
107 *
108 * On Gen6-7.5, we use an execbuf parameter to do this for us.
109 * However, the kernel ignores that when execlists are in use.
110 * Fortunately, we can just write the registers from userspace
111 * on Gen8+, and they're context saved/restored.
112 */
113 if (devinfo->gen >= 9) {
114 BEGIN_BATCH(3);
115 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
116 OUT_BATCH(CS_DEBUG_MODE2);
117 OUT_BATCH(REG_MASK(CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
118 CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
119 ADVANCE_BATCH();
120 } else if (devinfo->gen == 8) {
121 BEGIN_BATCH(3);
122 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
123 OUT_BATCH(INSTPM);
124 OUT_BATCH(REG_MASK(INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
125 INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
126 ADVANCE_BATCH();
127 }
128 }
129
130 static inline const struct brw_tracked_state *
131 brw_get_pipeline_atoms(struct brw_context *brw,
132 enum brw_pipeline pipeline)
133 {
134 switch (pipeline) {
135 case BRW_RENDER_PIPELINE:
136 return brw->render_atoms;
137 case BRW_COMPUTE_PIPELINE:
138 return brw->compute_atoms;
139 default:
140 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
141 unreachable("Unsupported pipeline");
142 return NULL;
143 }
144 }
145
146 void
147 brw_copy_pipeline_atoms(struct brw_context *brw,
148 enum brw_pipeline pipeline,
149 const struct brw_tracked_state **atoms,
150 int num_atoms)
151 {
152 /* This is to work around brw_context::atoms being declared const. We want
153 * it to be const, but it needs to be initialized somehow!
154 */
155 struct brw_tracked_state *context_atoms =
156 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
157
158 for (int i = 0; i < num_atoms; i++) {
159 context_atoms[i] = *atoms[i];
160 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
161 assert(context_atoms[i].emit);
162 }
163
164 brw->num_atoms[pipeline] = num_atoms;
165 }
166
167 void brw_init_state( struct brw_context *brw )
168 {
169 struct gl_context *ctx = &brw->ctx;
170 const struct gen_device_info *devinfo = &brw->screen->devinfo;
171
172 /* Force the first brw_select_pipeline to emit pipeline select */
173 brw->last_pipeline = BRW_NUM_PIPELINES;
174
175 brw_init_caches(brw);
176
177 if (devinfo->gen >= 10)
178 gen10_init_atoms(brw);
179 else if (devinfo->gen >= 9)
180 gen9_init_atoms(brw);
181 else if (devinfo->gen >= 8)
182 gen8_init_atoms(brw);
183 else if (devinfo->is_haswell)
184 gen75_init_atoms(brw);
185 else if (devinfo->gen >= 7)
186 gen7_init_atoms(brw);
187 else if (devinfo->gen >= 6)
188 gen6_init_atoms(brw);
189 else if (devinfo->gen >= 5)
190 gen5_init_atoms(brw);
191 else if (devinfo->is_g4x)
192 gen45_init_atoms(brw);
193 else
194 gen4_init_atoms(brw);
195
196 brw_upload_initial_gpu_state(brw);
197
198 brw->NewGLState = ~0;
199 brw->ctx.NewDriverState = ~0ull;
200
201 /* ~0 is a nonsensical value which won't match anything we program, so
202 * the programming will take effect on the first time around.
203 */
204 brw->pma_stall_bits = ~0;
205
206 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
207 * dirty flags.
208 */
209 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
210
211 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
212 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
213 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
214 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
215 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
216 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
217 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_ATOMIC_BUFFER;
218 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
219 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
220 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
221 }
222
223
224 void brw_destroy_state( struct brw_context *brw )
225 {
226 brw_destroy_caches(brw);
227 }
228
229 /***********************************************************************
230 */
231
232 static bool
233 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
234 {
235 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
236 }
237
238 static void accumulate_state( struct brw_state_flags *a,
239 const struct brw_state_flags *b )
240 {
241 a->mesa |= b->mesa;
242 a->brw |= b->brw;
243 }
244
245
246 static void xor_states( struct brw_state_flags *result,
247 const struct brw_state_flags *a,
248 const struct brw_state_flags *b )
249 {
250 result->mesa = a->mesa ^ b->mesa;
251 result->brw = a->brw ^ b->brw;
252 }
253
254 struct dirty_bit_map {
255 uint64_t bit;
256 char *name;
257 uint32_t count;
258 };
259
260 #define DEFINE_BIT(name) {name, #name, 0}
261
262 static struct dirty_bit_map mesa_bits[] = {
263 DEFINE_BIT(_NEW_MODELVIEW),
264 DEFINE_BIT(_NEW_PROJECTION),
265 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
266 DEFINE_BIT(_NEW_COLOR),
267 DEFINE_BIT(_NEW_DEPTH),
268 DEFINE_BIT(_NEW_EVAL),
269 DEFINE_BIT(_NEW_FOG),
270 DEFINE_BIT(_NEW_HINT),
271 DEFINE_BIT(_NEW_LIGHT),
272 DEFINE_BIT(_NEW_LINE),
273 DEFINE_BIT(_NEW_PIXEL),
274 DEFINE_BIT(_NEW_POINT),
275 DEFINE_BIT(_NEW_POLYGON),
276 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
277 DEFINE_BIT(_NEW_SCISSOR),
278 DEFINE_BIT(_NEW_STENCIL),
279 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
280 DEFINE_BIT(_NEW_TRANSFORM),
281 DEFINE_BIT(_NEW_VIEWPORT),
282 DEFINE_BIT(_NEW_TEXTURE_STATE),
283 DEFINE_BIT(_NEW_ARRAY),
284 DEFINE_BIT(_NEW_RENDERMODE),
285 DEFINE_BIT(_NEW_BUFFERS),
286 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
287 DEFINE_BIT(_NEW_MULTISAMPLE),
288 DEFINE_BIT(_NEW_TRACK_MATRIX),
289 DEFINE_BIT(_NEW_PROGRAM),
290 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
291 DEFINE_BIT(_NEW_FRAG_CLAMP),
292 /* Avoid sign extension problems. */
293 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
294 {0, 0, 0}
295 };
296
297 static struct dirty_bit_map brw_bits[] = {
298 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
299 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
300 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
301 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
302 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
303 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
304 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
305 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
306 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
307 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
308 DEFINE_BIT(BRW_NEW_URB_FENCE),
309 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
310 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
311 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
312 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
313 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
314 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
315 DEFINE_BIT(BRW_NEW_PRIMITIVE),
316 DEFINE_BIT(BRW_NEW_CONTEXT),
317 DEFINE_BIT(BRW_NEW_PSP),
318 DEFINE_BIT(BRW_NEW_SURFACES),
319 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
320 DEFINE_BIT(BRW_NEW_INDICES),
321 DEFINE_BIT(BRW_NEW_VERTICES),
322 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
323 DEFINE_BIT(BRW_NEW_BATCH),
324 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
325 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
326 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
327 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
328 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
329 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
330 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
331 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
332 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
333 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
334 DEFINE_BIT(BRW_NEW_STATS_WM),
335 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
336 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER),
337 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
338 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
339 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
340 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
341 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
342 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
343 DEFINE_BIT(BRW_NEW_CC_VP),
344 DEFINE_BIT(BRW_NEW_SF_VP),
345 DEFINE_BIT(BRW_NEW_CLIP_VP),
346 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
347 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
348 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
349 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
350 DEFINE_BIT(BRW_NEW_URB_SIZE),
351 DEFINE_BIT(BRW_NEW_CC_STATE),
352 DEFINE_BIT(BRW_NEW_BLORP),
353 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
354 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
355 DEFINE_BIT(BRW_NEW_DRAW_CALL),
356 DEFINE_BIT(BRW_NEW_AUX_STATE),
357 {0, 0, 0}
358 };
359
360 static void
361 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
362 {
363 for (int i = 0; bit_map[i].bit != 0; i++) {
364 if (bit_map[i].bit & bits)
365 bit_map[i].count++;
366 }
367 }
368
369 static void
370 brw_print_dirty_count(struct dirty_bit_map *bit_map)
371 {
372 for (int i = 0; bit_map[i].bit != 0; i++) {
373 if (bit_map[i].count > 1) {
374 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
375 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
376 }
377 }
378 }
379
380 static inline void
381 brw_upload_tess_programs(struct brw_context *brw)
382 {
383 if (brw->programs[MESA_SHADER_TESS_EVAL]) {
384 brw_upload_tcs_prog(brw);
385 brw_upload_tes_prog(brw);
386 } else {
387 brw->tcs.base.prog_data = NULL;
388 brw->tes.base.prog_data = NULL;
389 }
390 }
391
392 static inline void
393 brw_upload_programs(struct brw_context *brw,
394 enum brw_pipeline pipeline)
395 {
396 struct gl_context *ctx = &brw->ctx;
397 const struct gen_device_info *devinfo = &brw->screen->devinfo;
398
399 if (pipeline == BRW_RENDER_PIPELINE) {
400 brw_upload_vs_prog(brw);
401 brw_upload_tess_programs(brw);
402
403 if (brw->programs[MESA_SHADER_GEOMETRY]) {
404 brw_upload_gs_prog(brw);
405 } else {
406 brw->gs.base.prog_data = NULL;
407 if (devinfo->gen < 7)
408 brw_upload_ff_gs_prog(brw);
409 }
410
411 /* Update the VUE map for data exiting the GS stage of the pipeline.
412 * This comes from the last enabled shader stage.
413 */
414 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
415 bool old_separate = brw->vue_map_geom_out.separate;
416 struct brw_vue_prog_data *vue_prog_data;
417 if (brw->programs[MESA_SHADER_GEOMETRY])
418 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
419 else if (brw->programs[MESA_SHADER_TESS_EVAL])
420 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
421 else
422 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
423
424 brw->vue_map_geom_out = vue_prog_data->vue_map;
425
426 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
427 if (old_slots != brw->vue_map_geom_out.slots_valid ||
428 old_separate != brw->vue_map_geom_out.separate)
429 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
430
431 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
432 VARYING_BIT_VIEWPORT) {
433 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
434 brw->clip.viewport_count =
435 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
436 ctx->Const.MaxViewports : 1;
437 }
438
439 brw_upload_wm_prog(brw);
440
441 if (devinfo->gen < 6) {
442 brw_upload_clip_prog(brw);
443 brw_upload_sf_prog(brw);
444 }
445 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
446 brw_upload_cs_prog(brw);
447 }
448 }
449
450 static inline void
451 merge_ctx_state(struct brw_context *brw,
452 struct brw_state_flags *state)
453 {
454 state->mesa |= brw->NewGLState;
455 state->brw |= brw->ctx.NewDriverState;
456 }
457
458 static ALWAYS_INLINE void
459 check_and_emit_atom(struct brw_context *brw,
460 struct brw_state_flags *state,
461 const struct brw_tracked_state *atom)
462 {
463 if (check_state(state, &atom->dirty)) {
464 atom->emit(brw);
465 merge_ctx_state(brw, state);
466 }
467 }
468
469 static inline void
470 brw_upload_pipeline_state(struct brw_context *brw,
471 enum brw_pipeline pipeline)
472 {
473 const struct gen_device_info *devinfo = &brw->screen->devinfo;
474 struct gl_context *ctx = &brw->ctx;
475 int i;
476 static int dirty_count = 0;
477 struct brw_state_flags state = brw->state.pipelines[pipeline];
478 const unsigned fb_samples =
479 MAX2(_mesa_geometric_samples(ctx->DrawBuffer), 1);
480
481 brw_select_pipeline(brw, pipeline);
482
483 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
484 /* Always re-emit all state. */
485 brw->NewGLState = ~0;
486 ctx->NewDriverState = ~0ull;
487 }
488
489 if (pipeline == BRW_RENDER_PIPELINE) {
490 if (brw->programs[MESA_SHADER_FRAGMENT] !=
491 ctx->FragmentProgram._Current) {
492 brw->programs[MESA_SHADER_FRAGMENT] = ctx->FragmentProgram._Current;
493 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
494 }
495
496 if (brw->programs[MESA_SHADER_TESS_EVAL] !=
497 ctx->TessEvalProgram._Current) {
498 brw->programs[MESA_SHADER_TESS_EVAL] = ctx->TessEvalProgram._Current;
499 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
500 }
501
502 if (brw->programs[MESA_SHADER_TESS_CTRL] !=
503 ctx->TessCtrlProgram._Current) {
504 brw->programs[MESA_SHADER_TESS_CTRL] = ctx->TessCtrlProgram._Current;
505 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
506 }
507
508 if (brw->programs[MESA_SHADER_GEOMETRY] !=
509 ctx->GeometryProgram._Current) {
510 brw->programs[MESA_SHADER_GEOMETRY] = ctx->GeometryProgram._Current;
511 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
512 }
513
514 if (brw->programs[MESA_SHADER_VERTEX] != ctx->VertexProgram._Current) {
515 brw->programs[MESA_SHADER_VERTEX] = ctx->VertexProgram._Current;
516 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
517 }
518 }
519
520 if (brw->programs[MESA_SHADER_COMPUTE] != ctx->ComputeProgram._Current) {
521 brw->programs[MESA_SHADER_COMPUTE] = ctx->ComputeProgram._Current;
522 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
523 }
524
525 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
526 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
527 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
528 }
529
530 if (brw->num_samples != fb_samples) {
531 brw->num_samples = fb_samples;
532 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
533 }
534
535 /* Exit early if no state is flagged as dirty */
536 merge_ctx_state(brw, &state);
537 if ((state.mesa | state.brw) == 0)
538 return;
539
540 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
541 if (devinfo->gen == 6)
542 brw_emit_post_sync_nonzero_flush(brw);
543
544 brw_upload_programs(brw, pipeline);
545 merge_ctx_state(brw, &state);
546
547 brw_upload_state_base_address(brw);
548
549 const struct brw_tracked_state *atoms =
550 brw_get_pipeline_atoms(brw, pipeline);
551 const int num_atoms = brw->num_atoms[pipeline];
552
553 if (unlikely(INTEL_DEBUG)) {
554 /* Debug version which enforces various sanity checks on the
555 * state flags which are generated and checked to help ensure
556 * state atoms are ordered correctly in the list.
557 */
558 struct brw_state_flags examined, prev;
559 memset(&examined, 0, sizeof(examined));
560 prev = state;
561
562 for (i = 0; i < num_atoms; i++) {
563 const struct brw_tracked_state *atom = &atoms[i];
564 struct brw_state_flags generated;
565
566 check_and_emit_atom(brw, &state, atom);
567
568 accumulate_state(&examined, &atom->dirty);
569
570 /* generated = (prev ^ state)
571 * if (examined & generated)
572 * fail;
573 */
574 xor_states(&generated, &prev, &state);
575 assert(!check_state(&examined, &generated));
576 prev = state;
577 }
578 }
579 else {
580 for (i = 0; i < num_atoms; i++) {
581 const struct brw_tracked_state *atom = &atoms[i];
582
583 check_and_emit_atom(brw, &state, atom);
584 }
585 }
586
587 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
588 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
589
590 brw_update_dirty_count(mesa_bits, state.mesa);
591 brw_update_dirty_count(brw_bits, state.brw);
592 if (dirty_count++ % 1000 == 0) {
593 brw_print_dirty_count(mesa_bits);
594 brw_print_dirty_count(brw_bits);
595 fprintf(stderr, "\n");
596 }
597 }
598 }
599
600 /***********************************************************************
601 * Emit all state:
602 */
603 void brw_upload_render_state(struct brw_context *brw)
604 {
605 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
606 }
607
608 static inline void
609 brw_pipeline_state_finished(struct brw_context *brw,
610 enum brw_pipeline pipeline)
611 {
612 /* Save all dirty state into the other pipelines */
613 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
614 if (i != pipeline) {
615 brw->state.pipelines[i].mesa |= brw->NewGLState;
616 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
617 } else {
618 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
619 }
620 }
621
622 brw->NewGLState = 0;
623 brw->ctx.NewDriverState = 0ull;
624 }
625
626 /**
627 * Clear dirty bits to account for the fact that the state emitted by
628 * brw_upload_render_state() has been committed to the hardware. This is a
629 * separate call from brw_upload_render_state() because it's possible that
630 * after the call to brw_upload_render_state(), we will discover that we've
631 * run out of aperture space, and need to rewind the batch buffer to the state
632 * it had before the brw_upload_render_state() call.
633 */
634 void
635 brw_render_state_finished(struct brw_context *brw)
636 {
637 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
638 }
639
640 void
641 brw_upload_compute_state(struct brw_context *brw)
642 {
643 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
644 }
645
646 void
647 brw_compute_state_finished(struct brw_context *brw)
648 {
649 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
650 }