2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
39 const struct brw_tracked_state
*atoms
[] =
43 // &brw_wm_input_sizes,
50 /* Once all the programs are done, we know how large urb entry
51 * sizes need to be and can decide if we need to change the urb
55 &brw_recalculate_urb_fence
,
60 &brw_vs_surfaces
, /* must do before unit */
61 &brw_wm_constant_surface
, /* must do before wm surfaces/bind bo */
62 &brw_wm_surfaces
, /* must do before samplers and unit */
68 &brw_vs_unit
, /* always required, enabled or not */
75 &brw_state_base_address
,
77 &brw_binding_table_pointers
,
78 &brw_blend_constant_color
,
95 void brw_init_state( struct brw_context
*brw
)
101 void brw_destroy_state( struct brw_context
*brw
)
103 brw_destroy_caches(brw
);
104 brw_destroy_batch_cache(brw
);
107 /***********************************************************************
110 static GLboolean
check_state( const struct brw_state_flags
*a
,
111 const struct brw_state_flags
*b
)
113 return ((a
->mesa
& b
->mesa
) ||
115 (a
->cache
& b
->cache
));
118 static void accumulate_state( struct brw_state_flags
*a
,
119 const struct brw_state_flags
*b
)
123 a
->cache
|= b
->cache
;
127 static void xor_states( struct brw_state_flags
*result
,
128 const struct brw_state_flags
*a
,
129 const struct brw_state_flags
*b
)
131 result
->mesa
= a
->mesa
^ b
->mesa
;
132 result
->brw
= a
->brw
^ b
->brw
;
133 result
->cache
= a
->cache
^ b
->cache
;
137 brw_clear_validated_bos(struct brw_context
*brw
)
141 /* Clear the last round of validated bos */
142 for (i
= 0; i
< brw
->state
.validated_bo_count
; i
++) {
143 bo_reference(&brw
->state
.validated_bos
[i
], NULL
);
145 brw
->state
.validated_bo_count
= 0;
149 /***********************************************************************
152 enum pipe_error
brw_validate_state( struct brw_context
*brw
)
154 struct brw_state_flags
*state
= &brw
->state
.dirty
;
158 brw_clear_validated_bos(brw
);
159 brw_add_validated_bo(brw
, brw
->batch
->buf
);
161 if (brw
->flags
.always_emit_state
) {
167 if (state
->mesa
== 0 &&
172 if (brw
->state
.dirty
.brw
& BRW_NEW_CONTEXT
)
173 brw_clear_batch_cache(brw
);
175 /* do prepare stage for all atoms */
176 for (i
= 0; i
< Elements(atoms
); i
++) {
177 const struct brw_tracked_state
*atom
= atoms
[i
];
179 if (check_state(state
, &atom
->dirty
)) {
181 ret
= atom
->prepare(brw
);
188 /* Make sure that the textures which are referenced by the current
189 * brw fragment program are actually present/valid.
190 * If this fails, we can experience GPU lock-ups.
193 const struct brw_fragment_shader
*fp
= brw
->curr
.fragment_shader
;
195 assert(fp
->info
.file_max
[TGSI_FILE_SAMPLER
] < (int)brw
->curr
.num_samplers
);
196 assert(fp
->info
.texture_max
<= brw
->curr
.num_textures
);
204 enum pipe_error
brw_upload_state(struct brw_context
*brw
)
206 struct brw_state_flags
*state
= &brw
->state
.dirty
;
210 brw_clear_validated_bos(brw
);
213 /* Debug version which enforces various sanity checks on the
214 * state flags which are generated and checked to help ensure
215 * state atoms are ordered correctly in the list.
217 struct brw_state_flags examined
, prev
;
218 memset(&examined
, 0, sizeof(examined
));
221 for (i
= 0; i
< Elements(atoms
); i
++) {
222 const struct brw_tracked_state
*atom
= atoms
[i
];
223 struct brw_state_flags generated
;
225 assert(atom
->dirty
.mesa
||
229 if (check_state(state
, &atom
->dirty
)) {
231 ret
= atom
->emit( brw
);
237 accumulate_state(&examined
, &atom
->dirty
);
239 /* generated = (prev ^ state)
240 * if (examined & generated)
243 xor_states(&generated
, &prev
, state
);
244 assert(!check_state(&examined
, &generated
));
249 for (i
= 0; i
< Elements(atoms
); i
++) {
250 const struct brw_tracked_state
*atom
= atoms
[i
];
252 if (check_state(state
, &atom
->dirty
)) {
254 ret
= atom
->emit( brw
);
262 if (BRW_DEBUG
& DEBUG_STATE
) {
263 brw_update_dirty_counts( state
->mesa
,
268 /* Clear dirty flags:
270 memset(state
, 0, sizeof(*state
));