2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
37 #include "intel_batchbuffer.h"
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
45 const struct brw_tracked_state
*atoms
[] =
58 /* Once all the programs are done, we know how large urb entry
59 * sizes need to be and can decide if we need to change the urb
63 &brw_recalculate_urb_fence
,
69 &brw_wm_surfaces
, /* must do before samplers */
75 &brw_vs_unit
, /* always required, enabled or not */
82 &brw_state_base_address
,
85 &brw_binding_table_pointers
,
86 &brw_blend_constant_color
,
92 &brw_polygon_stipple_offset
,
96 /* Ordering of the commands below is documented as fixed.
99 &brw_pipelined_state_pointers
,
101 &brw_constant_buffer_state
,
107 NULL
, /* brw_constant_buffer */
111 void brw_init_state( struct brw_context
*brw
)
116 brw_init_caches(brw
);
118 brw
->state
.atoms
= _mesa_malloc(sizeof(atoms
));
119 brw
->state
.nr_atoms
= sizeof(atoms
)/sizeof(*atoms
);
120 _mesa_memcpy(brw
->state
.atoms
, atoms
, sizeof(atoms
));
122 /* Patch in a pointer to the dynamic state atom:
124 for (i
= 0; i
< brw
->state
.nr_atoms
; i
++)
125 if (brw
->state
.atoms
[i
] == NULL
)
126 brw
->state
.atoms
[i
] = &brw
->curbe
.tracked_state
;
128 _mesa_memcpy(&brw
->curbe
.tracked_state
,
129 &brw_constant_buffer
,
130 sizeof(brw_constant_buffer
));
134 void brw_destroy_state( struct brw_context
*brw
)
136 if (brw
->state
.atoms
) {
137 _mesa_free(brw
->state
.atoms
);
138 brw
->state
.atoms
= NULL
;
141 brw_destroy_caches(brw
);
142 brw_destroy_batch_cache(brw
);
143 brw_destroy_pools(brw
);
146 /***********************************************************************
149 static GLboolean
check_state( const struct brw_state_flags
*a
,
150 const struct brw_state_flags
*b
)
152 return ((a
->mesa
& b
->mesa
) ||
154 (a
->cache
& b
->cache
));
157 static void accumulate_state( struct brw_state_flags
*a
,
158 const struct brw_state_flags
*b
)
162 a
->cache
|= b
->cache
;
166 static void xor_states( struct brw_state_flags
*result
,
167 const struct brw_state_flags
*a
,
168 const struct brw_state_flags
*b
)
170 result
->mesa
= a
->mesa
^ b
->mesa
;
171 result
->brw
= a
->brw
^ b
->brw
;
172 result
->cache
= a
->cache
^ b
->cache
;
176 /***********************************************************************
179 void brw_validate_state( struct brw_context
*brw
)
181 struct brw_state_flags
*state
= &brw
->state
.dirty
;
184 state
->mesa
|= brw
->intel
.NewGLState
;
185 brw
->intel
.NewGLState
= 0;
188 state
->brw
|= BRW_NEW_CONTEXT
;
190 if (brw
->emit_state_always
) {
195 /* texenv program needs to notify us somehow when this happens:
196 * Some confusion about which state flag should represent this change.
198 if (brw
->fragment_program
!= brw
->attribs
.FragmentProgram
->_Current
) {
199 brw
->fragment_program
= brw
->attribs
.FragmentProgram
->_Current
;
200 brw
->state
.dirty
.mesa
|= _NEW_PROGRAM
;
201 brw
->state
.dirty
.brw
|= BRW_NEW_FRAGMENT_PROGRAM
;
205 if (state
->mesa
== 0 &&
210 if (brw
->state
.dirty
.brw
& BRW_NEW_CONTEXT
)
211 brw_clear_batch_cache_flush(brw
);
214 /* Make an early reference to the state pools, as we don't cope
215 * well with them being evicted from here down.
217 (void)bmBufferOffset(&brw
->intel
, brw
->pool
[BRW_GS_POOL
].buffer
);
218 (void)bmBufferOffset(&brw
->intel
, brw
->pool
[BRW_SS_POOL
].buffer
);
219 (void)bmBufferOffset(&brw
->intel
, brw
->intel
.batch
->buffer
);
222 /* Debug version which enforces various sanity checks on the
223 * state flags which are generated and checked to help ensure
224 * state atoms are ordered correctly in the list.
226 struct brw_state_flags examined
, prev
;
227 _mesa_memset(&examined
, 0, sizeof(examined
));
230 for (i
= 0; i
< brw
->state
.nr_atoms
; i
++) {
231 const struct brw_tracked_state
*atom
= brw
->state
.atoms
[i
];
232 struct brw_state_flags generated
;
234 assert(atom
->dirty
.mesa
||
237 assert(atom
->update
);
239 if (check_state(state
, &atom
->dirty
)) {
240 brw
->state
.atoms
[i
]->update( brw
);
245 accumulate_state(&examined
, &atom
->dirty
);
247 /* generated = (prev ^ state)
248 * if (examined & generated)
251 xor_states(&generated
, &prev
, state
);
252 assert(!check_state(&examined
, &generated
));
257 for (i
= 0; i
< Elements(atoms
); i
++) {
258 if (check_state(state
, &brw
->state
.atoms
[i
]->dirty
))
259 brw
->state
.atoms
[i
]->update( brw
);
263 memset(state
, 0, sizeof(*state
));