2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keithw@vmware.com>
34 #include "brw_context.h"
35 #include "brw_defines.h"
36 #include "brw_state.h"
37 #include "brw_program.h"
38 #include "drivers/common/meta.h"
39 #include "intel_batchbuffer.h"
40 #include "intel_buffers.h"
42 #include "brw_ff_gs.h"
46 #include "main/framebuffer.h"
49 brw_upload_initial_gpu_state(struct brw_context
*brw
)
51 /* On platforms with hardware contexts, we can set our initial GPU state
52 * right away rather than doing it via state atoms. This saves a small
53 * amount of overhead on every draw call.
59 brw_emit_post_sync_nonzero_flush(brw
);
61 brw_upload_invariant_state(brw
);
64 /* Recommended optimizations for Victim Cache eviction and floating
68 OUT_BATCH(MI_LOAD_REGISTER_IMM
| (3 - 2));
69 OUT_BATCH(GEN7_CACHE_MODE_1
);
70 OUT_BATCH(REG_MASK(GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE
) |
71 REG_MASK(GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC
) |
72 GEN9_FLOAT_BLEND_OPTIMIZATION_ENABLE
|
73 GEN9_PARTIAL_RESOLVE_DISABLE_IN_VC
);
78 gen8_emit_3dstate_sample_pattern(brw
);
81 OUT_BATCH(_3DSTATE_WM_HZ_OP
<< 16 | (5 - 2));
89 OUT_BATCH(_3DSTATE_WM_CHROMAKEY
<< 16 | (2 - 2));
95 static inline const struct brw_tracked_state
*
96 brw_get_pipeline_atoms(struct brw_context
*brw
,
97 enum brw_pipeline pipeline
)
100 case BRW_RENDER_PIPELINE
:
101 return brw
->render_atoms
;
102 case BRW_COMPUTE_PIPELINE
:
103 return brw
->compute_atoms
;
105 STATIC_ASSERT(BRW_NUM_PIPELINES
== 2);
106 unreachable("Unsupported pipeline");
112 brw_copy_pipeline_atoms(struct brw_context
*brw
,
113 enum brw_pipeline pipeline
,
114 const struct brw_tracked_state
**atoms
,
117 /* This is to work around brw_context::atoms being declared const. We want
118 * it to be const, but it needs to be initialized somehow!
120 struct brw_tracked_state
*context_atoms
=
121 (struct brw_tracked_state
*) brw_get_pipeline_atoms(brw
, pipeline
);
123 for (int i
= 0; i
< num_atoms
; i
++) {
124 context_atoms
[i
] = *atoms
[i
];
125 assert(context_atoms
[i
].dirty
.mesa
| context_atoms
[i
].dirty
.brw
);
126 assert(context_atoms
[i
].emit
);
129 brw
->num_atoms
[pipeline
] = num_atoms
;
132 void brw_init_state( struct brw_context
*brw
)
134 struct gl_context
*ctx
= &brw
->ctx
;
136 /* Force the first brw_select_pipeline to emit pipeline select */
137 brw
->last_pipeline
= BRW_NUM_PIPELINES
;
139 brw_init_caches(brw
);
142 gen10_init_atoms(brw
);
143 else if (brw
->gen
>= 9)
144 gen9_init_atoms(brw
);
145 else if (brw
->gen
>= 8)
146 gen8_init_atoms(brw
);
147 else if (brw
->is_haswell
)
148 gen75_init_atoms(brw
);
149 else if (brw
->gen
>= 7)
150 gen7_init_atoms(brw
);
151 else if (brw
->gen
>= 6)
152 gen6_init_atoms(brw
);
153 else if (brw
->gen
>= 5)
154 gen5_init_atoms(brw
);
155 else if (brw
->is_g4x
)
156 gen45_init_atoms(brw
);
158 gen4_init_atoms(brw
);
160 brw_upload_initial_gpu_state(brw
);
162 brw
->NewGLState
= ~0;
163 brw
->ctx
.NewDriverState
= ~0ull;
165 /* ~0 is a nonsensical value which won't match anything we program, so
166 * the programming will take effect on the first time around.
168 brw
->pma_stall_bits
= ~0;
170 /* Make sure that brw->ctx.NewDriverState has enough bits to hold all possible
173 STATIC_ASSERT(BRW_NUM_STATE_BITS
<= 8 * sizeof(brw
->ctx
.NewDriverState
));
175 ctx
->DriverFlags
.NewTransformFeedback
= BRW_NEW_TRANSFORM_FEEDBACK
;
176 ctx
->DriverFlags
.NewTransformFeedbackProg
= BRW_NEW_TRANSFORM_FEEDBACK
;
177 ctx
->DriverFlags
.NewRasterizerDiscard
= BRW_NEW_RASTERIZER_DISCARD
;
178 ctx
->DriverFlags
.NewUniformBuffer
= BRW_NEW_UNIFORM_BUFFER
;
179 ctx
->DriverFlags
.NewShaderStorageBuffer
= BRW_NEW_UNIFORM_BUFFER
;
180 ctx
->DriverFlags
.NewTextureBuffer
= BRW_NEW_TEXTURE_BUFFER
;
181 ctx
->DriverFlags
.NewAtomicBuffer
= BRW_NEW_ATOMIC_BUFFER
;
182 ctx
->DriverFlags
.NewImageUnits
= BRW_NEW_IMAGE_UNITS
;
183 ctx
->DriverFlags
.NewDefaultTessLevels
= BRW_NEW_DEFAULT_TESS_LEVELS
;
184 ctx
->DriverFlags
.NewIntelConservativeRasterization
= BRW_NEW_CONSERVATIVE_RASTERIZATION
;
188 void brw_destroy_state( struct brw_context
*brw
)
190 brw_destroy_caches(brw
);
193 /***********************************************************************
197 check_state(const struct brw_state_flags
*a
, const struct brw_state_flags
*b
)
199 return ((a
->mesa
& b
->mesa
) | (a
->brw
& b
->brw
)) != 0;
202 static void accumulate_state( struct brw_state_flags
*a
,
203 const struct brw_state_flags
*b
)
210 static void xor_states( struct brw_state_flags
*result
,
211 const struct brw_state_flags
*a
,
212 const struct brw_state_flags
*b
)
214 result
->mesa
= a
->mesa
^ b
->mesa
;
215 result
->brw
= a
->brw
^ b
->brw
;
218 struct dirty_bit_map
{
224 #define DEFINE_BIT(name) {name, #name, 0}
226 static struct dirty_bit_map mesa_bits
[] = {
227 DEFINE_BIT(_NEW_MODELVIEW
),
228 DEFINE_BIT(_NEW_PROJECTION
),
229 DEFINE_BIT(_NEW_TEXTURE_MATRIX
),
230 DEFINE_BIT(_NEW_COLOR
),
231 DEFINE_BIT(_NEW_DEPTH
),
232 DEFINE_BIT(_NEW_EVAL
),
233 DEFINE_BIT(_NEW_FOG
),
234 DEFINE_BIT(_NEW_HINT
),
235 DEFINE_BIT(_NEW_LIGHT
),
236 DEFINE_BIT(_NEW_LINE
),
237 DEFINE_BIT(_NEW_PIXEL
),
238 DEFINE_BIT(_NEW_POINT
),
239 DEFINE_BIT(_NEW_POLYGON
),
240 DEFINE_BIT(_NEW_POLYGONSTIPPLE
),
241 DEFINE_BIT(_NEW_SCISSOR
),
242 DEFINE_BIT(_NEW_STENCIL
),
243 DEFINE_BIT(_NEW_TEXTURE_OBJECT
),
244 DEFINE_BIT(_NEW_TRANSFORM
),
245 DEFINE_BIT(_NEW_VIEWPORT
),
246 DEFINE_BIT(_NEW_TEXTURE_STATE
),
247 DEFINE_BIT(_NEW_ARRAY
),
248 DEFINE_BIT(_NEW_RENDERMODE
),
249 DEFINE_BIT(_NEW_BUFFERS
),
250 DEFINE_BIT(_NEW_CURRENT_ATTRIB
),
251 DEFINE_BIT(_NEW_MULTISAMPLE
),
252 DEFINE_BIT(_NEW_TRACK_MATRIX
),
253 DEFINE_BIT(_NEW_PROGRAM
),
254 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS
),
255 DEFINE_BIT(_NEW_FRAG_CLAMP
),
256 /* Avoid sign extension problems. */
257 {(unsigned) _NEW_VARYING_VP_INPUTS
, "_NEW_VARYING_VP_INPUTS", 0},
261 static struct dirty_bit_map brw_bits
[] = {
262 DEFINE_BIT(BRW_NEW_FS_PROG_DATA
),
263 DEFINE_BIT(BRW_NEW_BLORP_BLIT_PROG_DATA
),
264 DEFINE_BIT(BRW_NEW_SF_PROG_DATA
),
265 DEFINE_BIT(BRW_NEW_VS_PROG_DATA
),
266 DEFINE_BIT(BRW_NEW_FF_GS_PROG_DATA
),
267 DEFINE_BIT(BRW_NEW_GS_PROG_DATA
),
268 DEFINE_BIT(BRW_NEW_TCS_PROG_DATA
),
269 DEFINE_BIT(BRW_NEW_TES_PROG_DATA
),
270 DEFINE_BIT(BRW_NEW_CLIP_PROG_DATA
),
271 DEFINE_BIT(BRW_NEW_CS_PROG_DATA
),
272 DEFINE_BIT(BRW_NEW_URB_FENCE
),
273 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM
),
274 DEFINE_BIT(BRW_NEW_GEOMETRY_PROGRAM
),
275 DEFINE_BIT(BRW_NEW_TESS_PROGRAMS
),
276 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM
),
277 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE
),
278 DEFINE_BIT(BRW_NEW_PATCH_PRIMITIVE
),
279 DEFINE_BIT(BRW_NEW_PRIMITIVE
),
280 DEFINE_BIT(BRW_NEW_CONTEXT
),
281 DEFINE_BIT(BRW_NEW_PSP
),
282 DEFINE_BIT(BRW_NEW_SURFACES
),
283 DEFINE_BIT(BRW_NEW_BINDING_TABLE_POINTERS
),
284 DEFINE_BIT(BRW_NEW_INDICES
),
285 DEFINE_BIT(BRW_NEW_VERTICES
),
286 DEFINE_BIT(BRW_NEW_DEFAULT_TESS_LEVELS
),
287 DEFINE_BIT(BRW_NEW_BATCH
),
288 DEFINE_BIT(BRW_NEW_INDEX_BUFFER
),
289 DEFINE_BIT(BRW_NEW_VS_CONSTBUF
),
290 DEFINE_BIT(BRW_NEW_TCS_CONSTBUF
),
291 DEFINE_BIT(BRW_NEW_TES_CONSTBUF
),
292 DEFINE_BIT(BRW_NEW_GS_CONSTBUF
),
293 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE
),
294 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS
),
295 DEFINE_BIT(BRW_NEW_VUE_MAP_GEOM_OUT
),
296 DEFINE_BIT(BRW_NEW_TRANSFORM_FEEDBACK
),
297 DEFINE_BIT(BRW_NEW_RASTERIZER_DISCARD
),
298 DEFINE_BIT(BRW_NEW_STATS_WM
),
299 DEFINE_BIT(BRW_NEW_UNIFORM_BUFFER
),
300 DEFINE_BIT(BRW_NEW_ATOMIC_BUFFER
),
301 DEFINE_BIT(BRW_NEW_IMAGE_UNITS
),
302 DEFINE_BIT(BRW_NEW_META_IN_PROGRESS
),
303 DEFINE_BIT(BRW_NEW_PUSH_CONSTANT_ALLOCATION
),
304 DEFINE_BIT(BRW_NEW_NUM_SAMPLES
),
305 DEFINE_BIT(BRW_NEW_TEXTURE_BUFFER
),
306 DEFINE_BIT(BRW_NEW_GEN4_UNIT_STATE
),
307 DEFINE_BIT(BRW_NEW_CC_VP
),
308 DEFINE_BIT(BRW_NEW_SF_VP
),
309 DEFINE_BIT(BRW_NEW_CLIP_VP
),
310 DEFINE_BIT(BRW_NEW_SAMPLER_STATE_TABLE
),
311 DEFINE_BIT(BRW_NEW_VS_ATTRIB_WORKAROUNDS
),
312 DEFINE_BIT(BRW_NEW_COMPUTE_PROGRAM
),
313 DEFINE_BIT(BRW_NEW_CS_WORK_GROUPS
),
314 DEFINE_BIT(BRW_NEW_URB_SIZE
),
315 DEFINE_BIT(BRW_NEW_CC_STATE
),
316 DEFINE_BIT(BRW_NEW_BLORP
),
317 DEFINE_BIT(BRW_NEW_VIEWPORT_COUNT
),
318 DEFINE_BIT(BRW_NEW_CONSERVATIVE_RASTERIZATION
),
323 brw_update_dirty_count(struct dirty_bit_map
*bit_map
, uint64_t bits
)
325 for (int i
= 0; bit_map
[i
].bit
!= 0; i
++) {
326 if (bit_map
[i
].bit
& bits
)
332 brw_print_dirty_count(struct dirty_bit_map
*bit_map
)
334 for (int i
= 0; bit_map
[i
].bit
!= 0; i
++) {
335 if (bit_map
[i
].count
> 1) {
336 fprintf(stderr
, "0x%016"PRIx64
": %12d (%s)\n",
337 bit_map
[i
].bit
, bit_map
[i
].count
, bit_map
[i
].name
);
343 brw_upload_tess_programs(struct brw_context
*brw
)
345 if (brw
->tess_eval_program
) {
346 brw_upload_tcs_prog(brw
);
347 brw_upload_tes_prog(brw
);
349 brw
->tcs
.base
.prog_data
= NULL
;
350 brw
->tes
.base
.prog_data
= NULL
;
355 brw_upload_programs(struct brw_context
*brw
,
356 enum brw_pipeline pipeline
)
358 struct gl_context
*ctx
= &brw
->ctx
;
360 if (pipeline
== BRW_RENDER_PIPELINE
) {
361 brw_upload_vs_prog(brw
);
362 brw_upload_tess_programs(brw
);
365 brw_upload_ff_gs_prog(brw
);
367 brw_upload_gs_prog(brw
);
369 /* Update the VUE map for data exiting the GS stage of the pipeline.
370 * This comes from the last enabled shader stage.
372 GLbitfield64 old_slots
= brw
->vue_map_geom_out
.slots_valid
;
373 bool old_separate
= brw
->vue_map_geom_out
.separate
;
374 struct brw_vue_prog_data
*vue_prog_data
;
375 if (brw
->geometry_program
)
376 vue_prog_data
= brw_vue_prog_data(brw
->gs
.base
.prog_data
);
377 else if (brw
->tess_eval_program
)
378 vue_prog_data
= brw_vue_prog_data(brw
->tes
.base
.prog_data
);
380 vue_prog_data
= brw_vue_prog_data(brw
->vs
.base
.prog_data
);
382 brw
->vue_map_geom_out
= vue_prog_data
->vue_map
;
384 /* If the layout has changed, signal BRW_NEW_VUE_MAP_GEOM_OUT. */
385 if (old_slots
!= brw
->vue_map_geom_out
.slots_valid
||
386 old_separate
!= brw
->vue_map_geom_out
.separate
)
387 brw
->ctx
.NewDriverState
|= BRW_NEW_VUE_MAP_GEOM_OUT
;
389 if ((old_slots
^ brw
->vue_map_geom_out
.slots_valid
) &
390 VARYING_BIT_VIEWPORT
) {
391 ctx
->NewDriverState
|= BRW_NEW_VIEWPORT_COUNT
;
392 brw
->clip
.viewport_count
=
393 (brw
->vue_map_geom_out
.slots_valid
& VARYING_BIT_VIEWPORT
) ?
394 ctx
->Const
.MaxViewports
: 1;
397 brw_upload_wm_prog(brw
);
400 brw_upload_clip_prog(brw
);
401 brw_upload_sf_prog(brw
);
403 } else if (pipeline
== BRW_COMPUTE_PIPELINE
) {
404 brw_upload_cs_prog(brw
);
409 merge_ctx_state(struct brw_context
*brw
,
410 struct brw_state_flags
*state
)
412 state
->mesa
|= brw
->NewGLState
;
413 state
->brw
|= brw
->ctx
.NewDriverState
;
417 check_and_emit_atom(struct brw_context
*brw
,
418 struct brw_state_flags
*state
,
419 const struct brw_tracked_state
*atom
)
421 if (check_state(state
, &atom
->dirty
)) {
423 merge_ctx_state(brw
, state
);
428 brw_upload_pipeline_state(struct brw_context
*brw
,
429 enum brw_pipeline pipeline
)
431 struct gl_context
*ctx
= &brw
->ctx
;
433 static int dirty_count
= 0;
434 struct brw_state_flags state
= brw
->state
.pipelines
[pipeline
];
435 unsigned int fb_samples
= _mesa_geometric_samples(ctx
->DrawBuffer
);
437 brw_select_pipeline(brw
, pipeline
);
440 /* Always re-emit all state. */
441 brw
->NewGLState
= ~0;
442 ctx
->NewDriverState
= ~0ull;
445 if (pipeline
== BRW_RENDER_PIPELINE
) {
446 if (brw
->fragment_program
!= ctx
->FragmentProgram
._Current
) {
447 brw
->fragment_program
= ctx
->FragmentProgram
._Current
;
448 brw
->ctx
.NewDriverState
|= BRW_NEW_FRAGMENT_PROGRAM
;
451 if (brw
->tess_eval_program
!= ctx
->TessEvalProgram
._Current
) {
452 brw
->tess_eval_program
= ctx
->TessEvalProgram
._Current
;
453 brw
->ctx
.NewDriverState
|= BRW_NEW_TESS_PROGRAMS
;
456 if (brw
->tess_ctrl_program
!= ctx
->TessCtrlProgram
._Current
) {
457 brw
->tess_ctrl_program
= ctx
->TessCtrlProgram
._Current
;
458 brw
->ctx
.NewDriverState
|= BRW_NEW_TESS_PROGRAMS
;
461 if (brw
->geometry_program
!= ctx
->GeometryProgram
._Current
) {
462 brw
->geometry_program
= ctx
->GeometryProgram
._Current
;
463 brw
->ctx
.NewDriverState
|= BRW_NEW_GEOMETRY_PROGRAM
;
466 if (brw
->vertex_program
!= ctx
->VertexProgram
._Current
) {
467 brw
->vertex_program
= ctx
->VertexProgram
._Current
;
468 brw
->ctx
.NewDriverState
|= BRW_NEW_VERTEX_PROGRAM
;
472 if (brw
->compute_program
!= ctx
->ComputeProgram
._Current
) {
473 brw
->compute_program
= ctx
->ComputeProgram
._Current
;
474 brw
->ctx
.NewDriverState
|= BRW_NEW_COMPUTE_PROGRAM
;
477 if (brw
->meta_in_progress
!= _mesa_meta_in_progress(ctx
)) {
478 brw
->meta_in_progress
= _mesa_meta_in_progress(ctx
);
479 brw
->ctx
.NewDriverState
|= BRW_NEW_META_IN_PROGRESS
;
482 if (brw
->num_samples
!= fb_samples
) {
483 brw
->num_samples
= fb_samples
;
484 brw
->ctx
.NewDriverState
|= BRW_NEW_NUM_SAMPLES
;
487 /* Exit early if no state is flagged as dirty */
488 merge_ctx_state(brw
, &state
);
489 if ((state
.mesa
| state
.brw
) == 0)
492 /* Emit Sandybridge workaround flushes on every primitive, for safety. */
494 brw_emit_post_sync_nonzero_flush(brw
);
496 brw_upload_programs(brw
, pipeline
);
497 merge_ctx_state(brw
, &state
);
499 brw_upload_state_base_address(brw
);
501 const struct brw_tracked_state
*atoms
=
502 brw_get_pipeline_atoms(brw
, pipeline
);
503 const int num_atoms
= brw
->num_atoms
[pipeline
];
505 if (unlikely(INTEL_DEBUG
)) {
506 /* Debug version which enforces various sanity checks on the
507 * state flags which are generated and checked to help ensure
508 * state atoms are ordered correctly in the list.
510 struct brw_state_flags examined
, prev
;
511 memset(&examined
, 0, sizeof(examined
));
514 for (i
= 0; i
< num_atoms
; i
++) {
515 const struct brw_tracked_state
*atom
= &atoms
[i
];
516 struct brw_state_flags generated
;
518 check_and_emit_atom(brw
, &state
, atom
);
520 accumulate_state(&examined
, &atom
->dirty
);
522 /* generated = (prev ^ state)
523 * if (examined & generated)
526 xor_states(&generated
, &prev
, &state
);
527 assert(!check_state(&examined
, &generated
));
532 for (i
= 0; i
< num_atoms
; i
++) {
533 const struct brw_tracked_state
*atom
= &atoms
[i
];
535 check_and_emit_atom(brw
, &state
, atom
);
539 if (unlikely(INTEL_DEBUG
& DEBUG_STATE
)) {
540 STATIC_ASSERT(ARRAY_SIZE(brw_bits
) == BRW_NUM_STATE_BITS
+ 1);
542 brw_update_dirty_count(mesa_bits
, state
.mesa
);
543 brw_update_dirty_count(brw_bits
, state
.brw
);
544 if (dirty_count
++ % 1000 == 0) {
545 brw_print_dirty_count(mesa_bits
);
546 brw_print_dirty_count(brw_bits
);
547 fprintf(stderr
, "\n");
552 /***********************************************************************
555 void brw_upload_render_state(struct brw_context
*brw
)
557 brw_upload_pipeline_state(brw
, BRW_RENDER_PIPELINE
);
561 brw_pipeline_state_finished(struct brw_context
*brw
,
562 enum brw_pipeline pipeline
)
564 /* Save all dirty state into the other pipelines */
565 for (unsigned i
= 0; i
< BRW_NUM_PIPELINES
; i
++) {
567 brw
->state
.pipelines
[i
].mesa
|= brw
->NewGLState
;
568 brw
->state
.pipelines
[i
].brw
|= brw
->ctx
.NewDriverState
;
570 memset(&brw
->state
.pipelines
[i
], 0, sizeof(struct brw_state_flags
));
575 brw
->ctx
.NewDriverState
= 0ull;
579 * Clear dirty bits to account for the fact that the state emitted by
580 * brw_upload_render_state() has been committed to the hardware. This is a
581 * separate call from brw_upload_render_state() because it's possible that
582 * after the call to brw_upload_render_state(), we will discover that we've
583 * run out of aperture space, and need to rewind the batch buffer to the state
584 * it had before the brw_upload_render_state() call.
587 brw_render_state_finished(struct brw_context
*brw
)
589 brw_pipeline_state_finished(brw
, BRW_RENDER_PIPELINE
);
593 brw_upload_compute_state(struct brw_context
*brw
)
595 brw_upload_pipeline_state(brw
, BRW_COMPUTE_PIPELINE
);
599 brw_compute_state_finished(struct brw_context
*brw
)
601 brw_pipeline_state_finished(brw
, BRW_COMPUTE_PIPELINE
);