2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
38 /* This is used to initialize brw->state.atoms[]. We could use this
39 * list directly except for a single atom, brw_constant_buffer, which
40 * has a .dirty value which changes according to the parameters of the
41 * current fragment and vertex programs, and so cannot be a static
44 const struct brw_tracked_state
*atoms
[] =
55 /* Once all the programs are done, we know how large urb entry
56 * sizes need to be and can decide if we need to change the urb
60 &brw_recalculate_urb_fence
,
66 &brw_wm_surfaces
, /* must do before samplers */
72 &brw_vs_unit
, /* always required, enabled or not */
79 &brw_state_base_address
,
81 &brw_binding_table_pointers
,
82 &brw_blend_constant_color
,
87 &brw_polygon_stipple_offset
,
90 &brw_aa_line_parameters
,
91 /* Ordering of the commands below is documented as fixed.
94 &brw_pipelined_state_pointers
,
96 &brw_constant_buffer_state
,
105 NULL
, /* brw_constant_buffer */
109 void brw_init_state( struct brw_context
*brw
)
115 brw
->state
.atoms
= _mesa_malloc(sizeof(atoms
));
116 brw
->state
.nr_atoms
= sizeof(atoms
)/sizeof(*atoms
);
117 _mesa_memcpy(brw
->state
.atoms
, atoms
, sizeof(atoms
));
119 /* Patch in a pointer to the dynamic state atom:
121 for (i
= 0; i
< brw
->state
.nr_atoms
; i
++)
122 if (brw
->state
.atoms
[i
] == NULL
)
123 brw
->state
.atoms
[i
] = &brw
->curbe
.tracked_state
;
125 _mesa_memcpy(&brw
->curbe
.tracked_state
,
126 &brw_constant_buffer
,
127 sizeof(brw_constant_buffer
));
131 void brw_destroy_state( struct brw_context
*brw
)
133 if (brw
->state
.atoms
) {
134 _mesa_free(brw
->state
.atoms
);
135 brw
->state
.atoms
= NULL
;
138 brw_destroy_cache(brw
);
139 brw_destroy_batch_cache(brw
);
142 /***********************************************************************
145 static GLboolean
check_state( const struct brw_state_flags
*a
,
146 const struct brw_state_flags
*b
)
148 return ((a
->mesa
& b
->mesa
) ||
150 (a
->cache
& b
->cache
));
153 static void accumulate_state( struct brw_state_flags
*a
,
154 const struct brw_state_flags
*b
)
158 a
->cache
|= b
->cache
;
162 static void xor_states( struct brw_state_flags
*result
,
163 const struct brw_state_flags
*a
,
164 const struct brw_state_flags
*b
)
166 result
->mesa
= a
->mesa
^ b
->mesa
;
167 result
->brw
= a
->brw
^ b
->brw
;
168 result
->cache
= a
->cache
^ b
->cache
;
172 brw_clear_validated_bos(struct brw_context
*brw
)
176 /* Clear the last round of validated bos */
177 for (i
= 0; i
< brw
->state
.validated_bo_count
; i
++) {
178 dri_bo_unreference(brw
->state
.validated_bos
[i
]);
179 brw
->state
.validated_bos
[i
] = NULL
;
181 brw
->state
.validated_bo_count
= 0;
184 /***********************************************************************
187 void brw_validate_state( struct brw_context
*brw
)
189 struct intel_context
*intel
= &brw
->intel
;
190 struct brw_state_flags
*state
= &brw
->state
.dirty
;
193 brw_clear_validated_bos(brw
);
195 state
->mesa
|= brw
->intel
.NewGLState
;
196 brw
->intel
.NewGLState
= 0;
198 brw_add_validated_bo(brw
, intel
->batch
->buf
);
200 if (brw
->emit_state_always
) {
205 /* texenv program needs to notify us somehow when this happens:
206 * Some confusion about which state flag should represent this change.
208 if (brw
->fragment_program
!= brw
->attribs
.FragmentProgram
->_Current
) {
209 brw
->fragment_program
= brw
->attribs
.FragmentProgram
->_Current
;
210 brw
->state
.dirty
.mesa
|= _NEW_PROGRAM
;
211 brw
->state
.dirty
.brw
|= BRW_NEW_FRAGMENT_PROGRAM
;
214 if (brw
->vertex_program
!= brw
->attribs
.VertexProgram
->_Current
) {
215 brw
->vertex_program
= brw
->attribs
.VertexProgram
->_Current
;
216 brw
->state
.dirty
.brw
|= BRW_NEW_VERTEX_PROGRAM
;
219 if (state
->mesa
== 0 &&
224 if (brw
->state
.dirty
.brw
& BRW_NEW_CONTEXT
)
225 brw_clear_batch_cache_flush(brw
);
227 brw
->intel
.Fallback
= 0;
229 /* do prepare stage for all atoms */
230 for (i
= 0; i
< Elements(atoms
); i
++) {
231 const struct brw_tracked_state
*atom
= brw
->state
.atoms
[i
];
233 if (brw
->intel
.Fallback
)
236 if (check_state(state
, &atom
->dirty
)) {
245 void brw_upload_state(struct brw_context
*brw
)
247 struct brw_state_flags
*state
= &brw
->state
.dirty
;
250 brw_clear_validated_bos(brw
);
253 /* Debug version which enforces various sanity checks on the
254 * state flags which are generated and checked to help ensure
255 * state atoms are ordered correctly in the list.
257 struct brw_state_flags examined
, prev
;
258 _mesa_memset(&examined
, 0, sizeof(examined
));
261 for (i
= 0; i
< brw
->state
.nr_atoms
; i
++) {
262 const struct brw_tracked_state
*atom
= brw
->state
.atoms
[i
];
263 struct brw_state_flags generated
;
265 assert(atom
->dirty
.mesa
||
269 if (brw
->intel
.Fallback
)
272 if (check_state(state
, &atom
->dirty
)) {
278 accumulate_state(&examined
, &atom
->dirty
);
280 /* generated = (prev ^ state)
281 * if (examined & generated)
284 xor_states(&generated
, &prev
, state
);
285 assert(!check_state(&examined
, &generated
));
290 for (i
= 0; i
< Elements(atoms
); i
++) {
291 const struct brw_tracked_state
*atom
= brw
->state
.atoms
[i
];
293 if (brw
->intel
.Fallback
)
296 if (check_state(state
, &atom
->dirty
)) {
304 if (!brw
->intel
.Fallback
)
305 memset(state
, 0, sizeof(*state
));