i965/gen9: Optimize slice and subslice load balancing behavior.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 void
49 brw_enable_obj_preemption(struct brw_context *brw, bool enable)
50 {
51 ASSERTED const struct gen_device_info *devinfo = &brw->screen->devinfo;
52 assert(devinfo->gen >= 9);
53
54 if (enable == brw->object_preemption)
55 return;
56
57 /* A fixed function pipe flush is required before modifying this field */
58 brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH);
59
60 bool replay_mode = enable ?
61 GEN9_REPLAY_MODE_MIDOBJECT : GEN9_REPLAY_MODE_MIDBUFFER;
62
63 /* enable object level preemption */
64 brw_load_register_imm32(brw, CS_CHICKEN1,
65 replay_mode | GEN9_REPLAY_MODE_MASK);
66
67 brw->object_preemption = enable;
68 }
69
70 static void
71 brw_upload_initial_gpu_state(struct brw_context *brw)
72 {
73 const struct gen_device_info *devinfo = &brw->screen->devinfo;
74 const struct brw_compiler *compiler = brw->screen->compiler;
75
76 /* On platforms with hardware contexts, we can set our initial GPU state
77 * right away rather than doing it via state atoms. This saves a small
78 * amount of overhead on every draw call.
79 */
80 if (!brw->hw_ctx)
81 return;
82
83 if (devinfo->gen == 6)
84 brw_emit_post_sync_nonzero_flush(brw);
85
86 brw_upload_invariant_state(brw);
87
88 if (devinfo->gen == 11) {
89 /* The default behavior of bit 5 "Headerless Message for Pre-emptable
90 * Contexts" in SAMPLER MODE register is set to 0, which means
91 * headerless sampler messages are not allowed for pre-emptable
92 * contexts. Set the bit 5 to 1 to allow them.
93 */
94 brw_load_register_imm32(brw, GEN11_SAMPLER_MODE,
95 HEADERLESS_MESSAGE_FOR_PREEMPTABLE_CONTEXTS_MASK |
96 HEADERLESS_MESSAGE_FOR_PREEMPTABLE_CONTEXTS);
97
98 /* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
99 * HALF_SLICE_CHICKEN7 register.
100 */
101 brw_load_register_imm32(brw, HALF_SLICE_CHICKEN7,
102 TEXEL_OFFSET_FIX_MASK |
103 TEXEL_OFFSET_FIX_ENABLE);
104
105 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
106 * in L3CNTLREG register. The default setting of the bit is not the
107 * desirable behavior.
108 */
109 brw_load_register_imm32(brw, GEN8_L3CNTLREG,
110 GEN8_L3CNTLREG_EDBC_NO_HANG);
111
112 /* WaEnableStateCacheRedirectToCS:icl */
113 brw_load_register_imm32(brw, SLICE_COMMON_ECO_CHICKEN1,
114 GEN11_STATE_CACHE_REDIRECT_TO_CS_SECTION_ENABLE |
115 REG_MASK(GEN11_STATE_CACHE_REDIRECT_TO_CS_SECTION_ENABLE));
116 }
117
118 /* hardware specification recommends disabling repacking for
119 * the compatibility with decompression mechanism in display controller.
120 */
121 if (devinfo->disable_ccs_repack) {
122 brw_load_register_imm32(brw, GEN7_CACHE_MODE_0,
123 GEN11_DISABLE_REPACKING_FOR_COMPRESSION |
124 REG_MASK(GEN11_DISABLE_REPACKING_FOR_COMPRESSION));
125 }
126
127 if (devinfo->gen == 9) {
128 /* Recommended optimizations for Victim Cache eviction and floating
129 * point blending.
130 */
131 brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
132 REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
133 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
134 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
135 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
136 }
137
138 if (devinfo->gen >= 8) {
139 gen8_emit_3dstate_sample_pattern(brw);
140
141 BEGIN_BATCH(5);
142 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
143 OUT_BATCH(0);
144 OUT_BATCH(0);
145 OUT_BATCH(0);
146 OUT_BATCH(0);
147 ADVANCE_BATCH();
148
149 BEGIN_BATCH(2);
150 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
151 OUT_BATCH(0);
152 ADVANCE_BATCH();
153 }
154
155 /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
156 * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
157 *
158 * This is only safe on kernels with context isolation support.
159 */
160 if (!compiler->constant_buffer_0_is_relative) {
161 if (devinfo->gen >= 9) {
162 BEGIN_BATCH(3);
163 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
164 OUT_BATCH(CS_DEBUG_MODE2);
165 OUT_BATCH(REG_MASK(CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
166 CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
167 ADVANCE_BATCH();
168 } else if (devinfo->gen == 8) {
169 BEGIN_BATCH(3);
170 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
171 OUT_BATCH(INSTPM);
172 OUT_BATCH(REG_MASK(INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
173 INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
174 ADVANCE_BATCH();
175 }
176 }
177
178 brw->object_preemption = false;
179
180 if (devinfo->gen >= 10)
181 brw_enable_obj_preemption(brw, true);
182 }
183
184 static inline const struct brw_tracked_state *
185 brw_get_pipeline_atoms(struct brw_context *brw,
186 enum brw_pipeline pipeline)
187 {
188 switch (pipeline) {
189 case BRW_RENDER_PIPELINE:
190 return brw->render_atoms;
191 case BRW_COMPUTE_PIPELINE:
192 return brw->compute_atoms;
193 default:
194 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
195 unreachable("Unsupported pipeline");
196 return NULL;
197 }
198 }
199
200 void
201 brw_copy_pipeline_atoms(struct brw_context *brw,
202 enum brw_pipeline pipeline,
203 const struct brw_tracked_state **atoms,
204 int num_atoms)
205 {
206 /* This is to work around brw_context::atoms being declared const. We want
207 * it to be const, but it needs to be initialized somehow!
208 */
209 struct brw_tracked_state *context_atoms =
210 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
211
212 for (int i = 0; i < num_atoms; i++) {
213 context_atoms[i] = *atoms[i];
214 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
215 assert(context_atoms[i].emit);
216 }
217
218 brw->num_atoms[pipeline] = num_atoms;
219 }
220
221 void brw_init_state( struct brw_context *brw )
222 {
223 struct gl_context *ctx = &brw->ctx;
224 const struct gen_device_info *devinfo = &brw->screen->devinfo;
225
226 /* Force the first brw_select_pipeline to emit pipeline select */
227 brw->last_pipeline = BRW_NUM_PIPELINES;
228
229 brw_init_caches(brw);
230
231 if (devinfo->gen >= 11)
232 gen11_init_atoms(brw);
233 else if (devinfo->gen >= 10)
234 gen10_init_atoms(brw);
235 else if (devinfo->gen >= 9)
236 gen9_init_atoms(brw);
237 else if (devinfo->gen >= 8)
238 gen8_init_atoms(brw);
239 else if (devinfo->is_haswell)
240 gen75_init_atoms(brw);
241 else if (devinfo->gen >= 7)
242 gen7_init_atoms(brw);
243 else if (devinfo->gen >= 6)
244 gen6_init_atoms(brw);
245 else if (devinfo->gen >= 5)
246 gen5_init_atoms(brw);
247 else if (devinfo->is_g4x)
248 gen45_init_atoms(brw);
249 else
250 gen4_init_atoms(brw);
251
252 brw_upload_initial_gpu_state(brw);
253
254 brw->NewGLState = ~0;
255 brw->ctx.NewDriverState = ~0ull;
256
257 /* ~0 is a nonsensical value which won't match anything we program, so
258 * the programming will take effect on the first time around.
259 */
260 brw->pma_stall_bits = ~0;
261
262 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
263 * dirty flags.
264 */
265 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
266
267 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
268 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
269 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
270 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
271 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
272 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
273 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_UNIFORM_BUFFER;
274 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
275 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
276 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
277 }
278
279
280 void brw_destroy_state( struct brw_context *brw )
281 {
282 brw_destroy_caches(brw);
283 }
284
285 /***********************************************************************
286 */
287
288 static bool
289 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
290 {
291 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
292 }
293
294 static void accumulate_state( struct brw_state_flags *a,
295 const struct brw_state_flags *b )
296 {
297 a->mesa |= b->mesa;
298 a->brw |= b->brw;
299 }
300
301
302 static void xor_states( struct brw_state_flags *result,
303 const struct brw_state_flags *a,
304 const struct brw_state_flags *b )
305 {
306 result->mesa = a->mesa ^ b->mesa;
307 result->brw = a->brw ^ b->brw;
308 }
309
310 struct dirty_bit_map {
311 uint64_t bit;
312 char *name;
313 uint32_t count;
314 };
315
316 #define DEFINE_BIT(name) {name, #name, 0}
317
318 static struct dirty_bit_map mesa_bits[] = {
319 DEFINE_BIT(_NEW_MODELVIEW),
320 DEFINE_BIT(_NEW_PROJECTION),
321 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
322 DEFINE_BIT(_NEW_COLOR),
323 DEFINE_BIT(_NEW_DEPTH),
324 DEFINE_BIT(_NEW_EVAL),
325 DEFINE_BIT(_NEW_FOG),
326 DEFINE_BIT(_NEW_HINT),
327 DEFINE_BIT(_NEW_LIGHT),
328 DEFINE_BIT(_NEW_LINE),
329 DEFINE_BIT(_NEW_PIXEL),
330 DEFINE_BIT(_NEW_POINT),
331 DEFINE_BIT(_NEW_POLYGON),
332 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
333 DEFINE_BIT(_NEW_SCISSOR),
334 DEFINE_BIT(_NEW_STENCIL),
335 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
336 DEFINE_BIT(_NEW_TRANSFORM),
337 DEFINE_BIT(_NEW_VIEWPORT),
338 DEFINE_BIT(_NEW_TEXTURE_STATE),
339 DEFINE_BIT(_NEW_RENDERMODE),
340 DEFINE_BIT(_NEW_BUFFERS),
341 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
342 DEFINE_BIT(_NEW_MULTISAMPLE),
343 DEFINE_BIT(_NEW_TRACK_MATRIX),
344 DEFINE_BIT(_NEW_PROGRAM),
345 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
346 DEFINE_BIT(_NEW_FRAG_CLAMP),
347 /* Avoid sign extension problems. */
348 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
349 {0, 0, 0}
350 };
351
352 static struct dirty_bit_map brw_bits[] = {
353 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
354 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
355 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
356 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
357 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
358 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
359 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
360 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
361 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
362 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
363 DEFINE_BIT(BRW_NEW_URB_FENCE),
364 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
365 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
366 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
367 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
368 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
369 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
370 DEFINE_BIT(BRW_NEW_PRIMITIVE),
371 DEFINE_BIT(BRW_NEW_CONTEXT),
372 DEFINE_BIT(BRW_NEW_PSP),
373 DEFINE_BIT(BRW_NEW_SURFACES),
374 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
375 DEFINE_BIT(BRW_NEW_INDICES),
376 DEFINE_BIT(BRW_NEW_VERTICES),
377 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
378 DEFINE_BIT(BRW_NEW_BATCH),
379 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
380 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
381 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
382 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
383 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
384 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
385 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
386 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
387 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
388 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
389 DEFINE_BIT(BRW_NEW_STATS_WM),
390 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
391 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
392 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
393 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
394 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
395 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
396 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
397 DEFINE_BIT(BRW_NEW_CC_VP),
398 DEFINE_BIT(BRW_NEW_SF_VP),
399 DEFINE_BIT(BRW_NEW_CLIP_VP),
400 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
401 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
402 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
403 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
404 DEFINE_BIT(BRW_NEW_URB_SIZE),
405 DEFINE_BIT(BRW_NEW_CC_STATE),
406 DEFINE_BIT(BRW_NEW_BLORP),
407 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
408 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
409 DEFINE_BIT(BRW_NEW_DRAW_CALL),
410 DEFINE_BIT(BRW_NEW_AUX_STATE),
411 {0, 0, 0}
412 };
413
414 static void
415 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
416 {
417 for (int i = 0; bit_map[i].bit != 0; i++) {
418 if (bit_map[i].bit & bits)
419 bit_map[i].count++;
420 }
421 }
422
423 static void
424 brw_print_dirty_count(struct dirty_bit_map *bit_map)
425 {
426 for (int i = 0; bit_map[i].bit != 0; i++) {
427 if (bit_map[i].count > 1) {
428 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
429 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
430 }
431 }
432 }
433
434 static inline void
435 brw_upload_tess_programs(struct brw_context *brw)
436 {
437 if (brw->programs[MESA_SHADER_TESS_EVAL]) {
438 brw_upload_tcs_prog(brw);
439 brw_upload_tes_prog(brw);
440 } else {
441 brw->tcs.base.prog_data = NULL;
442 brw->tes.base.prog_data = NULL;
443 }
444 }
445
446 static inline void
447 brw_upload_programs(struct brw_context *brw,
448 enum brw_pipeline pipeline)
449 {
450 struct gl_context *ctx = &brw->ctx;
451 const struct gen_device_info *devinfo = &brw->screen->devinfo;
452
453 if (pipeline == BRW_RENDER_PIPELINE) {
454 brw_upload_vs_prog(brw);
455 brw_upload_tess_programs(brw);
456
457 if (brw->programs[MESA_SHADER_GEOMETRY]) {
458 brw_upload_gs_prog(brw);
459 } else {
460 brw->gs.base.prog_data = NULL;
461 if (devinfo->gen < 7)
462 brw_upload_ff_gs_prog(brw);
463 }
464
465 /* Update the VUE map for data exiting the GS stage of the pipeline.
466 * This comes from the last enabled shader stage.
467 */
468 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
469 bool old_separate = brw->vue_map_geom_out.separate;
470 struct brw_vue_prog_data *vue_prog_data;
471 if (brw->programs[MESA_SHADER_GEOMETRY])
472 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
473 else if (brw->programs[MESA_SHADER_TESS_EVAL])
474 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
475 else
476 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
477
478 brw->vue_map_geom_out = vue_prog_data->vue_map;
479
480 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
481 if (old_slots != brw->vue_map_geom_out.slots_valid ||
482 old_separate != brw->vue_map_geom_out.separate)
483 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
484
485 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
486 VARYING_BIT_VIEWPORT) {
487 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
488 brw->clip.viewport_count =
489 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
490 ctx->Const.MaxViewports : 1;
491 }
492
493 brw_upload_wm_prog(brw);
494
495 if (devinfo->gen < 6) {
496 brw_upload_clip_prog(brw);
497 brw_upload_sf_prog(brw);
498 }
499
500 brw_disk_cache_write_render_programs(brw);
501 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
502 brw_upload_cs_prog(brw);
503 brw_disk_cache_write_compute_program(brw);
504 }
505 }
506
507 static inline void
508 merge_ctx_state(struct brw_context *brw,
509 struct brw_state_flags *state)
510 {
511 state->mesa |= brw->NewGLState;
512 state->brw |= brw->ctx.NewDriverState;
513 }
514
515 static ALWAYS_INLINE void
516 check_and_emit_atom(struct brw_context *brw,
517 struct brw_state_flags *state,
518 const struct brw_tracked_state *atom)
519 {
520 if (check_state(state, &atom->dirty)) {
521 atom->emit(brw);
522 merge_ctx_state(brw, state);
523 }
524 }
525
526 static inline void
527 brw_upload_pipeline_state(struct brw_context *brw,
528 enum brw_pipeline pipeline)
529 {
530 const struct gen_device_info *devinfo = &brw->screen->devinfo;
531 struct gl_context *ctx = &brw->ctx;
532 int i;
533 static int dirty_count = 0;
534 struct brw_state_flags state = brw->state.pipelines[pipeline];
535 const unsigned fb_samples =
536 MAX2(_mesa_geometric_samples(ctx->DrawBuffer), 1);
537
538 brw_select_pipeline(brw, pipeline);
539
540 if (pipeline == BRW_RENDER_PIPELINE && brw->current_hash_scale != 1)
541 brw_emit_hashing_mode(brw, UINT_MAX, UINT_MAX, 1);
542
543 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
544 /* Always re-emit all state. */
545 brw->NewGLState = ~0;
546 ctx->NewDriverState = ~0ull;
547 }
548
549 if (pipeline == BRW_RENDER_PIPELINE) {
550 if (brw->programs[MESA_SHADER_FRAGMENT] !=
551 ctx->FragmentProgram._Current) {
552 brw->programs[MESA_SHADER_FRAGMENT] = ctx->FragmentProgram._Current;
553 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
554 }
555
556 if (brw->programs[MESA_SHADER_TESS_EVAL] !=
557 ctx->TessEvalProgram._Current) {
558 brw->programs[MESA_SHADER_TESS_EVAL] = ctx->TessEvalProgram._Current;
559 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
560 }
561
562 if (brw->programs[MESA_SHADER_TESS_CTRL] !=
563 ctx->TessCtrlProgram._Current) {
564 brw->programs[MESA_SHADER_TESS_CTRL] = ctx->TessCtrlProgram._Current;
565 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
566 }
567
568 if (brw->programs[MESA_SHADER_GEOMETRY] !=
569 ctx->GeometryProgram._Current) {
570 brw->programs[MESA_SHADER_GEOMETRY] = ctx->GeometryProgram._Current;
571 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
572 }
573
574 if (brw->programs[MESA_SHADER_VERTEX] != ctx->VertexProgram._Current) {
575 brw->programs[MESA_SHADER_VERTEX] = ctx->VertexProgram._Current;
576 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
577 }
578 }
579
580 if (brw->programs[MESA_SHADER_COMPUTE] != ctx->ComputeProgram._Current) {
581 brw->programs[MESA_SHADER_COMPUTE] = ctx->ComputeProgram._Current;
582 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
583 }
584
585 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
586 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
587 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
588 }
589
590 if (brw->num_samples != fb_samples) {
591 brw->num_samples = fb_samples;
592 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
593 }
594
595 /* Exit early if no state is flagged as dirty */
596 merge_ctx_state(brw, &state);
597 if ((state.mesa | state.brw) == 0)
598 return;
599
600 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
601 if (devinfo->gen == 6)
602 brw_emit_post_sync_nonzero_flush(brw);
603
604 brw_upload_programs(brw, pipeline);
605 merge_ctx_state(brw, &state);
606
607 brw_upload_state_base_address(brw);
608
609 const struct brw_tracked_state *atoms =
610 brw_get_pipeline_atoms(brw, pipeline);
611 const int num_atoms = brw->num_atoms[pipeline];
612
613 if (unlikely(INTEL_DEBUG)) {
614 /* Debug version which enforces various sanity checks on the
615 * state flags which are generated and checked to help ensure
616 * state atoms are ordered correctly in the list.
617 */
618 struct brw_state_flags examined, prev;
619 memset(&examined, 0, sizeof(examined));
620 prev = state;
621
622 for (i = 0; i < num_atoms; i++) {
623 const struct brw_tracked_state *atom = &atoms[i];
624 struct brw_state_flags generated;
625
626 check_and_emit_atom(brw, &state, atom);
627
628 accumulate_state(&examined, &atom->dirty);
629
630 /* generated = (prev ^ state)
631 * if (examined & generated)
632 * fail;
633 */
634 xor_states(&generated, &prev, &state);
635 assert(!check_state(&examined, &generated));
636 prev = state;
637 }
638 }
639 else {
640 for (i = 0; i < num_atoms; i++) {
641 const struct brw_tracked_state *atom = &atoms[i];
642
643 check_and_emit_atom(brw, &state, atom);
644 }
645 }
646
647 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
648 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
649
650 brw_update_dirty_count(mesa_bits, state.mesa);
651 brw_update_dirty_count(brw_bits, state.brw);
652 if (dirty_count++ % 1000 == 0) {
653 brw_print_dirty_count(mesa_bits);
654 brw_print_dirty_count(brw_bits);
655 fprintf(stderr, "\n");
656 }
657 }
658 }
659
660 /***********************************************************************
661 * Emit all state:
662 */
663 void brw_upload_render_state(struct brw_context *brw)
664 {
665 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
666 }
667
668 static inline void
669 brw_pipeline_state_finished(struct brw_context *brw,
670 enum brw_pipeline pipeline)
671 {
672 /* Save all dirty state into the other pipelines */
673 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
674 if (i != pipeline) {
675 brw->state.pipelines[i].mesa |= brw->NewGLState;
676 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
677 } else {
678 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
679 }
680 }
681
682 brw->NewGLState = 0;
683 brw->ctx.NewDriverState = 0ull;
684 }
685
686 /**
687 * Clear dirty bits to account for the fact that the state emitted by
688 * brw_upload_render_state() has been committed to the hardware. This is a
689 * separate call from brw_upload_render_state() because it's possible that
690 * after the call to brw_upload_render_state(), we will discover that we've
691 * run out of aperture space, and need to rewind the batch buffer to the state
692 * it had before the brw_upload_render_state() call.
693 */
694 void
695 brw_render_state_finished(struct brw_context *brw)
696 {
697 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
698 }
699
700 void
701 brw_upload_compute_state(struct brw_context *brw)
702 {
703 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
704 }
705
706 void
707 brw_compute_state_finished(struct brw_context *brw)
708 {
709 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
710 }