i965: Enabled the OES_copy_image extension on Gen 7 GPUs
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keithw@vmware.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
41 #include "brw_vs.h"
42 #include "brw_ff_gs.h"
43 #include "brw_gs.h"
44 #include "brw_wm.h"
45 #include "brw_cs.h"
46 #include "main/framebuffer.h"
47
48 void
49 brw_enable_obj_preemption(struct brw_context *brw, bool enable)
50 {
51 const struct gen_device_info *devinfo = &brw->screen->devinfo;
52 assert(devinfo->gen >= 9);
53
54 if (enable == brw->object_preemption)
55 return;
56
57 /* A fixed function pipe flush is required before modifying this field */
58 brw_emit_end_of_pipe_sync(brw, PIPE_CONTROL_RENDER_TARGET_FLUSH);
59
60 bool replay_mode = enable ?
61 GEN9_REPLAY_MODE_MIDOBJECT : GEN9_REPLAY_MODE_MIDBUFFER;
62
63 /* enable object level preemption */
64 brw_load_register_imm32(brw, CS_CHICKEN1,
65 replay_mode | GEN9_REPLAY_MODE_MASK);
66
67 brw->object_preemption = enable;
68 }
69
70 static void
71 brw_upload_initial_gpu_state(struct brw_context *brw)
72 {
73 const struct gen_device_info *devinfo = &brw->screen->devinfo;
74 const struct brw_compiler *compiler = brw->screen->compiler;
75
76 /* On platforms with hardware contexts, we can set our initial GPU state
77 * right away rather than doing it via state atoms. This saves a small
78 * amount of overhead on every draw call.
79 */
80 if (!brw->hw_ctx)
81 return;
82
83 if (devinfo->gen == 6)
84 brw_emit_post_sync_nonzero_flush(brw);
85
86 brw_upload_invariant_state(brw);
87
88 if (devinfo->gen == 11) {
89 /* The default behavior of bit 5 "Headerless Message for Pre-emptable
90 * Contexts" in SAMPLER MODE register is set to 0, which means
91 * headerless sampler messages are not allowed for pre-emptable
92 * contexts. Set the bit 5 to 1 to allow them.
93 */
94 brw_load_register_imm32(brw, GEN11_SAMPLER_MODE,
95 HEADERLESS_MESSAGE_FOR_PREEMPTABLE_CONTEXTS_MASK |
96 HEADERLESS_MESSAGE_FOR_PREEMPTABLE_CONTEXTS);
97
98 /* Bit 1 "Enabled Texel Offset Precision Fix" must be set in
99 * HALF_SLICE_CHICKEN7 register.
100 */
101 brw_load_register_imm32(brw, HALF_SLICE_CHICKEN7,
102 TEXEL_OFFSET_FIX_MASK |
103 TEXEL_OFFSET_FIX_ENABLE);
104
105 /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
106 * in L3CNTLREG register. The default setting of the bit is not the
107 * desirable behavior.
108 */
109 brw_load_register_imm32(brw, GEN8_L3CNTLREG,
110 GEN8_L3CNTLREG_EDBC_NO_HANG);
111 }
112
113 if (devinfo->gen == 10 || devinfo->gen == 11) {
114 /* From gen10 workaround table in h/w specs:
115 *
116 * "On 3DSTATE_3D_MODE, driver must always program bits 31:16 of DW1
117 * a value of 0xFFFF"
118 *
119 * This means that we end up setting the entire 3D_MODE state. Bits
120 * in this register control things such as slice hashing and we want
121 * the default values of zero at the moment.
122 */
123 BEGIN_BATCH(2);
124 OUT_BATCH(_3DSTATE_3D_MODE << 16 | (2 - 2));
125 OUT_BATCH(0xFFFF << 16);
126 ADVANCE_BATCH();
127 }
128
129 if (devinfo->gen == 9) {
130 /* Recommended optimizations for Victim Cache eviction and floating
131 * point blending.
132 */
133 brw_load_register_imm32(brw, GEN7_CACHE_MODE_1,
134 REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE) |
135 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC) |
136 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE |
137 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC);
138
139 if (gen_device_info_is_9lp(devinfo)) {
140 brw_load_register_imm32(brw, GEN7_GT_MODE,
141 GEN9_SUBSLICE_HASHING_MASK_BITS |
142 GEN9_SUBSLICE_HASHING_16x16);
143 }
144 }
145
146 if (devinfo->gen >= 8) {
147 gen8_emit_3dstate_sample_pattern(brw);
148
149 BEGIN_BATCH(5);
150 OUT_BATCH(_3DSTATE_WM_HZ_OP << 16 | (5 - 2));
151 OUT_BATCH(0);
152 OUT_BATCH(0);
153 OUT_BATCH(0);
154 OUT_BATCH(0);
155 ADVANCE_BATCH();
156
157 BEGIN_BATCH(2);
158 OUT_BATCH(_3DSTATE_WM_CHROMAKEY << 16 | (2 - 2));
159 OUT_BATCH(0);
160 ADVANCE_BATCH();
161 }
162
163 /* Set the "CONSTANT_BUFFER Address Offset Disable" bit, so
164 * 3DSTATE_CONSTANT_XS buffer 0 is an absolute address.
165 *
166 * This is only safe on kernels with context isolation support.
167 */
168 if (!compiler->constant_buffer_0_is_relative) {
169 if (devinfo->gen >= 9) {
170 BEGIN_BATCH(3);
171 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
172 OUT_BATCH(CS_DEBUG_MODE2);
173 OUT_BATCH(REG_MASK(CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
174 CSDBG2_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
175 ADVANCE_BATCH();
176 } else if (devinfo->gen == 8) {
177 BEGIN_BATCH(3);
178 OUT_BATCH(MI_LOAD_REGISTER_IMM | (3 - 2));
179 OUT_BATCH(INSTPM);
180 OUT_BATCH(REG_MASK(INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE) |
181 INSTPM_CONSTANT_BUFFER_ADDRESS_OFFSET_DISABLE);
182 ADVANCE_BATCH();
183 }
184 }
185
186 brw->object_preemption = false;
187
188 if (devinfo->gen >= 10)
189 brw_enable_obj_preemption(brw, true);
190 }
191
192 static inline const struct brw_tracked_state *
193 brw_get_pipeline_atoms(struct brw_context *brw,
194 enum brw_pipeline pipeline)
195 {
196 switch (pipeline) {
197 case BRW_RENDER_PIPELINE:
198 return brw->render_atoms;
199 case BRW_COMPUTE_PIPELINE:
200 return brw->compute_atoms;
201 default:
202 STATIC_ASSERT(BRW_NUM_PIPELINES == 2);
203 unreachable("Unsupported pipeline");
204 return NULL;
205 }
206 }
207
208 void
209 brw_copy_pipeline_atoms(struct brw_context *brw,
210 enum brw_pipeline pipeline,
211 const struct brw_tracked_state **atoms,
212 int num_atoms)
213 {
214 /* This is to work around brw_context::atoms being declared const. We want
215 * it to be const, but it needs to be initialized somehow!
216 */
217 struct brw_tracked_state *context_atoms =
218 (struct brw_tracked_state *) brw_get_pipeline_atoms(brw, pipeline);
219
220 for (int i = 0; i < num_atoms; i++) {
221 context_atoms[i] = *atoms[i];
222 assert(context_atoms[i].dirty.mesa | context_atoms[i].dirty.brw);
223 assert(context_atoms[i].emit);
224 }
225
226 brw->num_atoms[pipeline] = num_atoms;
227 }
228
229 void brw_init_state( struct brw_context *brw )
230 {
231 struct gl_context *ctx = &brw->ctx;
232 const struct gen_device_info *devinfo = &brw->screen->devinfo;
233
234 /* Force the first brw_select_pipeline to emit pipeline select */
235 brw->last_pipeline = BRW_NUM_PIPELINES;
236
237 brw_init_caches(brw);
238
239 if (devinfo->gen >= 11)
240 gen11_init_atoms(brw);
241 else if (devinfo->gen >= 10)
242 gen10_init_atoms(brw);
243 else if (devinfo->gen >= 9)
244 gen9_init_atoms(brw);
245 else if (devinfo->gen >= 8)
246 gen8_init_atoms(brw);
247 else if (devinfo->is_haswell)
248 gen75_init_atoms(brw);
249 else if (devinfo->gen >= 7)
250 gen7_init_atoms(brw);
251 else if (devinfo->gen >= 6)
252 gen6_init_atoms(brw);
253 else if (devinfo->gen >= 5)
254 gen5_init_atoms(brw);
255 else if (devinfo->is_g4x)
256 gen45_init_atoms(brw);
257 else
258 gen4_init_atoms(brw);
259
260 brw_upload_initial_gpu_state(brw);
261
262 brw->NewGLState = ~0;
263 brw->ctx.NewDriverState = ~0ull;
264
265 /* ~0 is a nonsensical value which won't match anything we program, so
266 * the programming will take effect on the first time around.
267 */
268 brw->pma_stall_bits = ~0;
269
270 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
271 * dirty flags.
272 */
273 STATIC_ASSERT(BRW_NUM_STATE_BITS <= 8 * sizeof(brw->ctx.NewDriverState));
274
275 ctx->DriverFlags.NewTransformFeedback = BRW_NEW_TRANSFORM_FEEDBACK;
276 ctx->DriverFlags.NewTransformFeedbackProg = BRW_NEW_TRANSFORM_FEEDBACK;
277 ctx->DriverFlags.NewRasterizerDiscard = BRW_NEW_RASTERIZER_DISCARD;
278 ctx->DriverFlags.NewUniformBuffer = BRW_NEW_UNIFORM_BUFFER;
279 ctx->DriverFlags.NewShaderStorageBuffer = BRW_NEW_UNIFORM_BUFFER;
280 ctx->DriverFlags.NewTextureBuffer = BRW_NEW_TEXTURE_BUFFER;
281 ctx->DriverFlags.NewAtomicBuffer = BRW_NEW_UNIFORM_BUFFER;
282 ctx->DriverFlags.NewImageUnits = BRW_NEW_IMAGE_UNITS;
283 ctx->DriverFlags.NewDefaultTessLevels = BRW_NEW_DEFAULT_TESS_LEVELS;
284 ctx->DriverFlags.NewIntelConservativeRasterization = BRW_NEW_CONSERVATIVE_RASTERIZATION;
285 }
286
287
288 void brw_destroy_state( struct brw_context *brw )
289 {
290 brw_destroy_caches(brw);
291 }
292
293 /***********************************************************************
294 */
295
296 static bool
297 check_state(const struct brw_state_flags *a, const struct brw_state_flags *b)
298 {
299 return ((a->mesa & b->mesa) | (a->brw & b->brw)) != 0;
300 }
301
302 static void accumulate_state( struct brw_state_flags *a,
303 const struct brw_state_flags *b )
304 {
305 a->mesa |= b->mesa;
306 a->brw |= b->brw;
307 }
308
309
310 static void xor_states( struct brw_state_flags *result,
311 const struct brw_state_flags *a,
312 const struct brw_state_flags *b )
313 {
314 result->mesa = a->mesa ^ b->mesa;
315 result->brw = a->brw ^ b->brw;
316 }
317
318 struct dirty_bit_map {
319 uint64_t bit;
320 char *name;
321 uint32_t count;
322 };
323
324 #define DEFINE_BIT(name) {name, #name, 0}
325
326 static struct dirty_bit_map mesa_bits[] = {
327 DEFINE_BIT(_NEW_MODELVIEW),
328 DEFINE_BIT(_NEW_PROJECTION),
329 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
330 DEFINE_BIT(_NEW_COLOR),
331 DEFINE_BIT(_NEW_DEPTH),
332 DEFINE_BIT(_NEW_EVAL),
333 DEFINE_BIT(_NEW_FOG),
334 DEFINE_BIT(_NEW_HINT),
335 DEFINE_BIT(_NEW_LIGHT),
336 DEFINE_BIT(_NEW_LINE),
337 DEFINE_BIT(_NEW_PIXEL),
338 DEFINE_BIT(_NEW_POINT),
339 DEFINE_BIT(_NEW_POLYGON),
340 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
341 DEFINE_BIT(_NEW_SCISSOR),
342 DEFINE_BIT(_NEW_STENCIL),
343 DEFINE_BIT(_NEW_TEXTURE_OBJECT),
344 DEFINE_BIT(_NEW_TRANSFORM),
345 DEFINE_BIT(_NEW_VIEWPORT),
346 DEFINE_BIT(_NEW_TEXTURE_STATE),
347 DEFINE_BIT(_NEW_ARRAY),
348 DEFINE_BIT(_NEW_RENDERMODE),
349 DEFINE_BIT(_NEW_BUFFERS),
350 DEFINE_BIT(_NEW_CURRENT_ATTRIB),
351 DEFINE_BIT(_NEW_MULTISAMPLE),
352 DEFINE_BIT(_NEW_TRACK_MATRIX),
353 DEFINE_BIT(_NEW_PROGRAM),
354 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
355 DEFINE_BIT(_NEW_FRAG_CLAMP),
356 /* Avoid sign extension problems. */
357 {(unsigned) _NEW_VARYING_VP_INPUTS, "_NEW_VARYING_VP_INPUTS", 0},
358 {0, 0, 0}
359 };
360
361 static struct dirty_bit_map brw_bits[] = {
362 DEFINE_BIT(BRW_NEW_FS_PROG_DATA),
363 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA),
364 DEFINE_BIT(BRW_NEW_SF_PROG_DATA),
365 DEFINE_BIT(BRW_NEW_VS_PROG_DATA),
366 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA),
367 DEFINE_BIT(BRW_NEW_GS_PROG_DATA),
368 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA),
369 DEFINE_BIT(BRW_NEW_TES_PROG_DATA),
370 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA),
371 DEFINE_BIT(BRW_NEW_CS_PROG_DATA),
372 DEFINE_BIT(BRW_NEW_URB_FENCE),
373 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
374 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM),
375 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS),
376 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
377 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
378 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE),
379 DEFINE_BIT(BRW_NEW_PRIMITIVE),
380 DEFINE_BIT(BRW_NEW_CONTEXT),
381 DEFINE_BIT(BRW_NEW_PSP),
382 DEFINE_BIT(BRW_NEW_SURFACES),
383 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS),
384 DEFINE_BIT(BRW_NEW_INDICES),
385 DEFINE_BIT(BRW_NEW_VERTICES),
386 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS),
387 DEFINE_BIT(BRW_NEW_BATCH),
388 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
389 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
390 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF),
391 DEFINE_BIT(BRW_NEW_TES_CONSTBUF),
392 DEFINE_BIT(BRW_NEW_GS_CONSTBUF),
393 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE),
394 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS),
395 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT),
396 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK),
397 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD),
398 DEFINE_BIT(BRW_NEW_STATS_WM),
399 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER),
400 DEFINE_BIT(BRW_NEW_IMAGE_UNITS),
401 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS),
402 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION),
403 DEFINE_BIT(BRW_NEW_NUM_SAMPLES),
404 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER),
405 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE),
406 DEFINE_BIT(BRW_NEW_CC_VP),
407 DEFINE_BIT(BRW_NEW_SF_VP),
408 DEFINE_BIT(BRW_NEW_CLIP_VP),
409 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE),
410 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS),
411 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM),
412 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS),
413 DEFINE_BIT(BRW_NEW_URB_SIZE),
414 DEFINE_BIT(BRW_NEW_CC_STATE),
415 DEFINE_BIT(BRW_NEW_BLORP),
416 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT),
417 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION),
418 DEFINE_BIT(BRW_NEW_DRAW_CALL),
419 DEFINE_BIT(BRW_NEW_AUX_STATE),
420 {0, 0, 0}
421 };
422
423 static void
424 brw_update_dirty_count(struct dirty_bit_map *bit_map, uint64_t bits)
425 {
426 for (int i = 0; bit_map[i].bit != 0; i++) {
427 if (bit_map[i].bit & bits)
428 bit_map[i].count++;
429 }
430 }
431
432 static void
433 brw_print_dirty_count(struct dirty_bit_map *bit_map)
434 {
435 for (int i = 0; bit_map[i].bit != 0; i++) {
436 if (bit_map[i].count > 1) {
437 fprintf(stderr, "0x%016"PRIx64": %12d (%s)\n",
438 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
439 }
440 }
441 }
442
443 static inline void
444 brw_upload_tess_programs(struct brw_context *brw)
445 {
446 if (brw->programs[MESA_SHADER_TESS_EVAL]) {
447 brw_upload_tcs_prog(brw);
448 brw_upload_tes_prog(brw);
449 } else {
450 brw->tcs.base.prog_data = NULL;
451 brw->tes.base.prog_data = NULL;
452 }
453 }
454
455 static inline void
456 brw_upload_programs(struct brw_context *brw,
457 enum brw_pipeline pipeline)
458 {
459 struct gl_context *ctx = &brw->ctx;
460 const struct gen_device_info *devinfo = &brw->screen->devinfo;
461
462 if (pipeline == BRW_RENDER_PIPELINE) {
463 brw_upload_vs_prog(brw);
464 brw_upload_tess_programs(brw);
465
466 if (brw->programs[MESA_SHADER_GEOMETRY]) {
467 brw_upload_gs_prog(brw);
468 } else {
469 brw->gs.base.prog_data = NULL;
470 if (devinfo->gen < 7)
471 brw_upload_ff_gs_prog(brw);
472 }
473
474 /* Update the VUE map for data exiting the GS stage of the pipeline.
475 * This comes from the last enabled shader stage.
476 */
477 GLbitfield64 old_slots = brw->vue_map_geom_out.slots_valid;
478 bool old_separate = brw->vue_map_geom_out.separate;
479 struct brw_vue_prog_data *vue_prog_data;
480 if (brw->programs[MESA_SHADER_GEOMETRY])
481 vue_prog_data = brw_vue_prog_data(brw->gs.base.prog_data);
482 else if (brw->programs[MESA_SHADER_TESS_EVAL])
483 vue_prog_data = brw_vue_prog_data(brw->tes.base.prog_data);
484 else
485 vue_prog_data = brw_vue_prog_data(brw->vs.base.prog_data);
486
487 brw->vue_map_geom_out = vue_prog_data->vue_map;
488
489 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
490 if (old_slots != brw->vue_map_geom_out.slots_valid ||
491 old_separate != brw->vue_map_geom_out.separate)
492 brw->ctx.NewDriverState |= BRW_NEW_VUE_MAP_GEOM_OUT;
493
494 if ((old_slots ^ brw->vue_map_geom_out.slots_valid) &
495 VARYING_BIT_VIEWPORT) {
496 ctx->NewDriverState |= BRW_NEW_VIEWPORT_COUNT;
497 brw->clip.viewport_count =
498 (brw->vue_map_geom_out.slots_valid & VARYING_BIT_VIEWPORT) ?
499 ctx->Const.MaxViewports : 1;
500 }
501
502 brw_upload_wm_prog(brw);
503
504 if (devinfo->gen < 6) {
505 brw_upload_clip_prog(brw);
506 brw_upload_sf_prog(brw);
507 }
508
509 brw_disk_cache_write_render_programs(brw);
510 } else if (pipeline == BRW_COMPUTE_PIPELINE) {
511 brw_upload_cs_prog(brw);
512 brw_disk_cache_write_compute_program(brw);
513 }
514 }
515
516 static inline void
517 merge_ctx_state(struct brw_context *brw,
518 struct brw_state_flags *state)
519 {
520 state->mesa |= brw->NewGLState;
521 state->brw |= brw->ctx.NewDriverState;
522 }
523
524 static ALWAYS_INLINE void
525 check_and_emit_atom(struct brw_context *brw,
526 struct brw_state_flags *state,
527 const struct brw_tracked_state *atom)
528 {
529 if (check_state(state, &atom->dirty)) {
530 atom->emit(brw);
531 merge_ctx_state(brw, state);
532 }
533 }
534
535 static inline void
536 brw_upload_pipeline_state(struct brw_context *brw,
537 enum brw_pipeline pipeline)
538 {
539 const struct gen_device_info *devinfo = &brw->screen->devinfo;
540 struct gl_context *ctx = &brw->ctx;
541 int i;
542 static int dirty_count = 0;
543 struct brw_state_flags state = brw->state.pipelines[pipeline];
544 const unsigned fb_samples =
545 MAX2(_mesa_geometric_samples(ctx->DrawBuffer), 1);
546
547 brw_select_pipeline(brw, pipeline);
548
549 if (unlikely(INTEL_DEBUG & DEBUG_REEMIT)) {
550 /* Always re-emit all state. */
551 brw->NewGLState = ~0;
552 ctx->NewDriverState = ~0ull;
553 }
554
555 if (pipeline == BRW_RENDER_PIPELINE) {
556 if (brw->programs[MESA_SHADER_FRAGMENT] !=
557 ctx->FragmentProgram._Current) {
558 brw->programs[MESA_SHADER_FRAGMENT] = ctx->FragmentProgram._Current;
559 brw->ctx.NewDriverState |= BRW_NEW_FRAGMENT_PROGRAM;
560 }
561
562 if (brw->programs[MESA_SHADER_TESS_EVAL] !=
563 ctx->TessEvalProgram._Current) {
564 brw->programs[MESA_SHADER_TESS_EVAL] = ctx->TessEvalProgram._Current;
565 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
566 }
567
568 if (brw->programs[MESA_SHADER_TESS_CTRL] !=
569 ctx->TessCtrlProgram._Current) {
570 brw->programs[MESA_SHADER_TESS_CTRL] = ctx->TessCtrlProgram._Current;
571 brw->ctx.NewDriverState |= BRW_NEW_TESS_PROGRAMS;
572 }
573
574 if (brw->programs[MESA_SHADER_GEOMETRY] !=
575 ctx->GeometryProgram._Current) {
576 brw->programs[MESA_SHADER_GEOMETRY] = ctx->GeometryProgram._Current;
577 brw->ctx.NewDriverState |= BRW_NEW_GEOMETRY_PROGRAM;
578 }
579
580 if (brw->programs[MESA_SHADER_VERTEX] != ctx->VertexProgram._Current) {
581 brw->programs[MESA_SHADER_VERTEX] = ctx->VertexProgram._Current;
582 brw->ctx.NewDriverState |= BRW_NEW_VERTEX_PROGRAM;
583 }
584 }
585
586 if (brw->programs[MESA_SHADER_COMPUTE] != ctx->ComputeProgram._Current) {
587 brw->programs[MESA_SHADER_COMPUTE] = ctx->ComputeProgram._Current;
588 brw->ctx.NewDriverState |= BRW_NEW_COMPUTE_PROGRAM;
589 }
590
591 if (brw->meta_in_progress != _mesa_meta_in_progress(ctx)) {
592 brw->meta_in_progress = _mesa_meta_in_progress(ctx);
593 brw->ctx.NewDriverState |= BRW_NEW_META_IN_PROGRESS;
594 }
595
596 if (brw->num_samples != fb_samples) {
597 brw->num_samples = fb_samples;
598 brw->ctx.NewDriverState |= BRW_NEW_NUM_SAMPLES;
599 }
600
601 /* Exit early if no state is flagged as dirty */
602 merge_ctx_state(brw, &state);
603 if ((state.mesa | state.brw) == 0)
604 return;
605
606 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
607 if (devinfo->gen == 6)
608 brw_emit_post_sync_nonzero_flush(brw);
609
610 brw_upload_programs(brw, pipeline);
611 merge_ctx_state(brw, &state);
612
613 brw_upload_state_base_address(brw);
614
615 const struct brw_tracked_state *atoms =
616 brw_get_pipeline_atoms(brw, pipeline);
617 const int num_atoms = brw->num_atoms[pipeline];
618
619 if (unlikely(INTEL_DEBUG)) {
620 /* Debug version which enforces various sanity checks on the
621 * state flags which are generated and checked to help ensure
622 * state atoms are ordered correctly in the list.
623 */
624 struct brw_state_flags examined, prev;
625 memset(&examined, 0, sizeof(examined));
626 prev = state;
627
628 for (i = 0; i < num_atoms; i++) {
629 const struct brw_tracked_state *atom = &atoms[i];
630 struct brw_state_flags generated;
631
632 check_and_emit_atom(brw, &state, atom);
633
634 accumulate_state(&examined, &atom->dirty);
635
636 /* generated = (prev ^ state)
637 * if (examined & generated)
638 * fail;
639 */
640 xor_states(&generated, &prev, &state);
641 assert(!check_state(&examined, &generated));
642 prev = state;
643 }
644 }
645 else {
646 for (i = 0; i < num_atoms; i++) {
647 const struct brw_tracked_state *atom = &atoms[i];
648
649 check_and_emit_atom(brw, &state, atom);
650 }
651 }
652
653 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
654 STATIC_ASSERT(ARRAY_SIZE(brw_bits) == BRW_NUM_STATE_BITS + 1);
655
656 brw_update_dirty_count(mesa_bits, state.mesa);
657 brw_update_dirty_count(brw_bits, state.brw);
658 if (dirty_count++ % 1000 == 0) {
659 brw_print_dirty_count(mesa_bits);
660 brw_print_dirty_count(brw_bits);
661 fprintf(stderr, "\n");
662 }
663 }
664 }
665
666 /***********************************************************************
667 * Emit all state:
668 */
669 void brw_upload_render_state(struct brw_context *brw)
670 {
671 brw_upload_pipeline_state(brw, BRW_RENDER_PIPELINE);
672 }
673
674 static inline void
675 brw_pipeline_state_finished(struct brw_context *brw,
676 enum brw_pipeline pipeline)
677 {
678 /* Save all dirty state into the other pipelines */
679 for (unsigned i = 0; i < BRW_NUM_PIPELINES; i++) {
680 if (i != pipeline) {
681 brw->state.pipelines[i].mesa |= brw->NewGLState;
682 brw->state.pipelines[i].brw |= brw->ctx.NewDriverState;
683 } else {
684 memset(&brw->state.pipelines[i], 0, sizeof(struct brw_state_flags));
685 }
686 }
687
688 brw->NewGLState = 0;
689 brw->ctx.NewDriverState = 0ull;
690 }
691
692 /**
693 * Clear dirty bits to account for the fact that the state emitted by
694 * brw_upload_render_state() has been committed to the hardware. This is a
695 * separate call from brw_upload_render_state() because it's possible that
696 * after the call to brw_upload_render_state(), we will discover that we've
697 * run out of aperture space, and need to rewind the batch buffer to the state
698 * it had before the brw_upload_render_state() call.
699 */
700 void
701 brw_render_state_finished(struct brw_context *brw)
702 {
703 brw_pipeline_state_finished(brw, BRW_RENDER_PIPELINE);
704 }
705
706 void
707 brw_upload_compute_state(struct brw_context *brw)
708 {
709 brw_upload_pipeline_state(brw, BRW_COMPUTE_PIPELINE);
710 }
711
712 void
713 brw_compute_state_finished(struct brw_context *brw)
714 {
715 brw_pipeline_state_finished(brw, BRW_COMPUTE_PIPELINE);
716 }