2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
45 static const struct brw_tracked_state
*gen4_atoms
[] =
50 &brw_vs_prog
, /* must do before GS prog, state base address. */
51 &brw_gs_prog
, /* must do before state base address */
52 &brw_clip_prog
, /* must do before state base address */
53 &brw_sf_prog
, /* must do before state base address */
54 &brw_wm_prog
, /* must do before state base address */
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
61 &brw_recalculate_urb_fence
,
66 &brw_vs_constants
, /* Before vs_surfaces and constant_buffer */
67 &brw_wm_constants
, /* Before wm_surfaces and constant_buffer */
69 &brw_vs_surfaces
, /* must do before unit */
70 &brw_wm_constant_surface
, /* must do before wm surfaces/bind bo */
71 &brw_wm_surfaces
, /* must do before samplers and unit */
72 &brw_wm_binding_table
,
78 &brw_vs_unit
, /* always required, enabled or not */
85 &brw_state_base_address
,
87 &brw_binding_table_pointers
,
88 &brw_blend_constant_color
,
93 &brw_polygon_stipple_offset
,
96 &brw_aa_line_parameters
,
108 static const struct brw_tracked_state
*gen6_atoms
[] =
113 &brw_vs_prog
, /* must do before state base address */
114 &brw_gs_prog
, /* must do before state base address */
115 &brw_wm_prog
, /* must do before state base address */
120 /* Command packets: */
121 &brw_invarient_state
,
123 /* must do before binding table pointers, cc state ptrs */
124 &brw_state_base_address
,
127 &gen6_viewport_state
, /* must do after *_vp stages */
130 &gen6_blend_state
, /* must do before cc unit */
131 &gen6_color_calc_state
, /* must do before cc unit */
132 &gen6_depth_stencil_state
, /* must do before cc unit */
133 &gen6_cc_state_pointers
,
135 &brw_vs_constants
, /* Before vs_surfaces and constant_buffer */
136 &brw_wm_constants
, /* Before wm_surfaces and constant_buffer */
137 &gen6_vs_constants
, /* Before vs_state */
138 &gen6_wm_constants
, /* Before wm_state */
140 &brw_vs_surfaces
, /* must do before unit */
141 &brw_wm_constant_surface
, /* must do before wm surfaces/bind bo */
142 &brw_wm_surfaces
, /* must do before samplers and unit */
143 &brw_wm_binding_table
,
156 &gen6_binding_table_pointers
,
160 &brw_polygon_stipple
,
161 &brw_polygon_stipple_offset
,
164 &brw_aa_line_parameters
,
173 const struct brw_tracked_state
*gen7_atoms
[] =
182 /* Command packets: */
183 &brw_invarient_state
,
185 /* must do before binding table pointers, cc state ptrs */
186 &brw_state_base_address
,
189 &gen7_cc_viewport_state_pointer
, /* must do after brw_cc_vp */
190 &gen7_sf_clip_viewport
,
193 &gen6_blend_state
, /* must do before cc unit */
194 &gen6_color_calc_state
, /* must do before cc unit */
195 &gen6_depth_stencil_state
, /* must do before cc unit */
196 &gen7_blend_state_pointer
,
197 &gen7_cc_state_pointer
,
198 &gen7_depth_stencil_state_pointer
,
200 &brw_vs_constants
, /* Before vs_surfaces and constant_buffer */
201 &brw_wm_constants
, /* Before wm_surfaces and constant_buffer */
202 &gen6_vs_constants
, /* Before vs_state */
203 &gen7_wm_constants
, /* Before wm_surfaces and constant_buffer */
205 &brw_vs_surfaces
, /* must do before unit */
206 &gen7_wm_constant_surface
, /* must do before wm surfaces/bind bo */
207 &gen7_wm_surfaces
, /* must do before samplers and unit */
208 &brw_wm_binding_table
,
212 &gen7_disable_stages
,
224 &brw_polygon_stipple
,
225 &brw_polygon_stipple_offset
,
228 &brw_aa_line_parameters
,
238 void brw_init_state( struct brw_context
*brw
)
240 const struct brw_tracked_state
**atoms
;
243 brw_init_caches(brw
);
245 if (brw
->intel
.gen
>= 7) {
247 num_atoms
= ARRAY_SIZE(gen7_atoms
);
248 } else if (brw
->intel
.gen
== 6) {
250 num_atoms
= ARRAY_SIZE(gen6_atoms
);
253 num_atoms
= ARRAY_SIZE(gen4_atoms
);
256 while (num_atoms
--) {
257 assert((*atoms
)->dirty
.mesa
|
258 (*atoms
)->dirty
.brw
|
259 (*atoms
)->dirty
.cache
);
261 if ((*atoms
)->prepare
)
262 brw
->prepare_atoms
[brw
->num_prepare_atoms
++] = **atoms
;
264 brw
->emit_atoms
[brw
->num_emit_atoms
++] = **atoms
;
267 assert(brw
->num_emit_atoms
<= ARRAY_SIZE(brw
->emit_atoms
));
268 assert(brw
->num_prepare_atoms
<= ARRAY_SIZE(brw
->prepare_atoms
));
272 void brw_destroy_state( struct brw_context
*brw
)
274 brw_destroy_caches(brw
);
277 /***********************************************************************
280 static GLuint
check_state( const struct brw_state_flags
*a
,
281 const struct brw_state_flags
*b
)
283 return ((a
->mesa
& b
->mesa
) |
285 (a
->cache
& b
->cache
)) != 0;
288 static void accumulate_state( struct brw_state_flags
*a
,
289 const struct brw_state_flags
*b
)
293 a
->cache
|= b
->cache
;
297 static void xor_states( struct brw_state_flags
*result
,
298 const struct brw_state_flags
*a
,
299 const struct brw_state_flags
*b
)
301 result
->mesa
= a
->mesa
^ b
->mesa
;
302 result
->brw
= a
->brw
^ b
->brw
;
303 result
->cache
= a
->cache
^ b
->cache
;
307 brw_clear_validated_bos(struct brw_context
*brw
)
311 /* Clear the last round of validated bos */
312 for (i
= 0; i
< brw
->state
.validated_bo_count
; i
++) {
313 drm_intel_bo_unreference(brw
->state
.validated_bos
[i
]);
314 brw
->state
.validated_bos
[i
] = NULL
;
316 brw
->state
.validated_bo_count
= 0;
319 struct dirty_bit_map
{
325 #define DEFINE_BIT(name) {name, #name, 0}
327 static struct dirty_bit_map mesa_bits
[] = {
328 DEFINE_BIT(_NEW_MODELVIEW
),
329 DEFINE_BIT(_NEW_PROJECTION
),
330 DEFINE_BIT(_NEW_TEXTURE_MATRIX
),
331 DEFINE_BIT(_NEW_COLOR
),
332 DEFINE_BIT(_NEW_DEPTH
),
333 DEFINE_BIT(_NEW_EVAL
),
334 DEFINE_BIT(_NEW_FOG
),
335 DEFINE_BIT(_NEW_HINT
),
336 DEFINE_BIT(_NEW_LIGHT
),
337 DEFINE_BIT(_NEW_LINE
),
338 DEFINE_BIT(_NEW_PIXEL
),
339 DEFINE_BIT(_NEW_POINT
),
340 DEFINE_BIT(_NEW_POLYGON
),
341 DEFINE_BIT(_NEW_POLYGONSTIPPLE
),
342 DEFINE_BIT(_NEW_SCISSOR
),
343 DEFINE_BIT(_NEW_STENCIL
),
344 DEFINE_BIT(_NEW_TEXTURE
),
345 DEFINE_BIT(_NEW_TRANSFORM
),
346 DEFINE_BIT(_NEW_VIEWPORT
),
347 DEFINE_BIT(_NEW_PACKUNPACK
),
348 DEFINE_BIT(_NEW_ARRAY
),
349 DEFINE_BIT(_NEW_RENDERMODE
),
350 DEFINE_BIT(_NEW_BUFFERS
),
351 DEFINE_BIT(_NEW_MULTISAMPLE
),
352 DEFINE_BIT(_NEW_TRACK_MATRIX
),
353 DEFINE_BIT(_NEW_PROGRAM
),
354 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS
),
358 static struct dirty_bit_map brw_bits
[] = {
359 DEFINE_BIT(BRW_NEW_URB_FENCE
),
360 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM
),
361 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM
),
362 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS
),
363 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS
),
364 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE
),
365 DEFINE_BIT(BRW_NEW_PRIMITIVE
),
366 DEFINE_BIT(BRW_NEW_CONTEXT
),
367 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS
),
368 DEFINE_BIT(BRW_NEW_PROGRAM_CACHE
),
369 DEFINE_BIT(BRW_NEW_PSP
),
370 DEFINE_BIT(BRW_NEW_WM_SURFACES
),
371 DEFINE_BIT(BRW_NEW_INDICES
),
372 DEFINE_BIT(BRW_NEW_INDEX_BUFFER
),
373 DEFINE_BIT(BRW_NEW_VERTICES
),
374 DEFINE_BIT(BRW_NEW_BATCH
),
375 DEFINE_BIT(BRW_NEW_NR_WM_SURFACES
),
376 DEFINE_BIT(BRW_NEW_NR_VS_SURFACES
),
377 DEFINE_BIT(BRW_NEW_VS_CONSTBUF
),
378 DEFINE_BIT(BRW_NEW_WM_CONSTBUF
),
379 DEFINE_BIT(BRW_NEW_VS_BINDING_TABLE
),
380 DEFINE_BIT(BRW_NEW_GS_BINDING_TABLE
),
381 DEFINE_BIT(BRW_NEW_PS_BINDING_TABLE
),
382 DEFINE_BIT(BRW_NEW_STATE_BASE_ADDRESS
),
386 static struct dirty_bit_map cache_bits
[] = {
387 DEFINE_BIT(CACHE_NEW_BLEND_STATE
),
388 DEFINE_BIT(CACHE_NEW_CC_VP
),
389 DEFINE_BIT(CACHE_NEW_CC_UNIT
),
390 DEFINE_BIT(CACHE_NEW_WM_PROG
),
391 DEFINE_BIT(CACHE_NEW_SAMPLER
),
392 DEFINE_BIT(CACHE_NEW_WM_UNIT
),
393 DEFINE_BIT(CACHE_NEW_SF_PROG
),
394 DEFINE_BIT(CACHE_NEW_SF_VP
),
395 DEFINE_BIT(CACHE_NEW_SF_UNIT
),
396 DEFINE_BIT(CACHE_NEW_VS_UNIT
),
397 DEFINE_BIT(CACHE_NEW_VS_PROG
),
398 DEFINE_BIT(CACHE_NEW_GS_UNIT
),
399 DEFINE_BIT(CACHE_NEW_GS_PROG
),
400 DEFINE_BIT(CACHE_NEW_CLIP_VP
),
401 DEFINE_BIT(CACHE_NEW_CLIP_UNIT
),
402 DEFINE_BIT(CACHE_NEW_CLIP_PROG
),
408 brw_update_dirty_count(struct dirty_bit_map
*bit_map
, int32_t bits
)
412 for (i
= 0; i
< 32; i
++) {
413 if (bit_map
[i
].bit
== 0)
416 if (bit_map
[i
].bit
& bits
)
422 brw_print_dirty_count(struct dirty_bit_map
*bit_map
, int32_t bits
)
426 for (i
= 0; i
< 32; i
++) {
427 if (bit_map
[i
].bit
== 0)
430 fprintf(stderr
, "0x%08x: %12d (%s)\n",
431 bit_map
[i
].bit
, bit_map
[i
].count
, bit_map
[i
].name
);
435 /***********************************************************************
438 void brw_validate_state( struct brw_context
*brw
)
440 struct gl_context
*ctx
= &brw
->intel
.ctx
;
441 struct intel_context
*intel
= &brw
->intel
;
442 struct brw_state_flags
*state
= &brw
->state
.dirty
;
443 const struct brw_tracked_state
*atoms
= brw
->prepare_atoms
;
444 int num_atoms
= brw
->num_prepare_atoms
;
447 brw_clear_validated_bos(brw
);
449 state
->mesa
|= brw
->intel
.NewGLState
;
450 brw
->intel
.NewGLState
= 0;
452 brw_add_validated_bo(brw
, intel
->batch
.bo
);
454 if (brw
->emit_state_always
) {
460 if (brw
->fragment_program
!= ctx
->FragmentProgram
._Current
) {
461 brw
->fragment_program
= ctx
->FragmentProgram
._Current
;
462 brw
->state
.dirty
.brw
|= BRW_NEW_FRAGMENT_PROGRAM
;
465 if (brw
->vertex_program
!= ctx
->VertexProgram
._Current
) {
466 brw
->vertex_program
= ctx
->VertexProgram
._Current
;
467 brw
->state
.dirty
.brw
|= BRW_NEW_VERTEX_PROGRAM
;
470 if ((state
->mesa
| state
->cache
| state
->brw
) == 0)
473 brw
->intel
.Fallback
= GL_FALSE
; /* boolean, not bitfield */
475 /* do prepare stage for all atoms */
476 for (i
= 0; i
< num_atoms
; i
++) {
477 const struct brw_tracked_state
*atom
= &atoms
[i
];
479 if (check_state(state
, &atom
->dirty
)) {
482 if (brw
->intel
.Fallback
)
487 intel_check_front_buffer_rendering(intel
);
489 /* Make sure that the textures which are referenced by the current
490 * brw fragment program are actually present/valid.
491 * If this fails, we can experience GPU lock-ups.
494 const struct brw_fragment_program
*fp
;
495 fp
= brw_fragment_program_const(brw
->fragment_program
);
497 assert((fp
->tex_units_used
& ctx
->Texture
._EnabledUnits
)
498 == fp
->tex_units_used
);
504 void brw_upload_state(struct brw_context
*brw
)
506 struct brw_state_flags
*state
= &brw
->state
.dirty
;
507 const struct brw_tracked_state
*atoms
= brw
->emit_atoms
;
508 int num_atoms
= brw
->num_emit_atoms
;
510 static int dirty_count
= 0;
512 brw_clear_validated_bos(brw
);
514 if (unlikely(INTEL_DEBUG
)) {
515 /* Debug version which enforces various sanity checks on the
516 * state flags which are generated and checked to help ensure
517 * state atoms are ordered correctly in the list.
519 struct brw_state_flags examined
, prev
;
520 memset(&examined
, 0, sizeof(examined
));
523 for (i
= 0; i
< num_atoms
; i
++) {
524 const struct brw_tracked_state
*atom
= &atoms
[i
];
525 struct brw_state_flags generated
;
527 if (brw
->intel
.Fallback
)
530 if (check_state(state
, &atom
->dirty
)) {
534 accumulate_state(&examined
, &atom
->dirty
);
536 /* generated = (prev ^ state)
537 * if (examined & generated)
540 xor_states(&generated
, &prev
, state
);
541 assert(!check_state(&examined
, &generated
));
546 for (i
= 0; i
< num_atoms
; i
++) {
547 const struct brw_tracked_state
*atom
= &atoms
[i
];
549 if (brw
->intel
.Fallback
)
552 if (check_state(state
, &atom
->dirty
)) {
558 if (unlikely(INTEL_DEBUG
& DEBUG_STATE
)) {
559 brw_update_dirty_count(mesa_bits
, state
->mesa
);
560 brw_update_dirty_count(brw_bits
, state
->brw
);
561 brw_update_dirty_count(cache_bits
, state
->cache
);
562 if (dirty_count
++ % 1000 == 0) {
563 brw_print_dirty_count(mesa_bits
, state
->mesa
);
564 brw_print_dirty_count(brw_bits
, state
->brw
);
565 brw_print_dirty_count(cache_bits
, state
->cache
);
566 fprintf(stderr
, "\n");
570 if (!brw
->intel
.Fallback
)
571 memset(state
, 0, sizeof(*state
));