2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 **********************************************************************/
29 * Keith Whitwell <keith@tungstengraphics.com>
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
39 const struct brw_tracked_state
*atoms
[] =
43 // &brw_wm_input_sizes,
50 /* Once all the programs are done, we know how large urb entry
51 * sizes need to be and can decide if we need to change the urb
55 &brw_recalculate_urb_fence
,
60 &brw_vs_surfaces
, /* must do before unit */
61 &brw_wm_constant_surface
, /* must do before wm surfaces/bind bo */
62 &brw_wm_surfaces
, /* must do before samplers and unit */
68 &brw_vs_unit
, /* always required, enabled or not */
75 &brw_state_base_address
,
77 &brw_binding_table_pointers
,
78 &brw_blend_constant_color
,
95 void brw_init_state( struct brw_context
*brw
)
101 void brw_destroy_state( struct brw_context
*brw
)
103 brw_destroy_caches(brw
);
104 brw_destroy_batch_cache(brw
);
107 /***********************************************************************
110 static GLboolean
check_state( const struct brw_state_flags
*a
,
111 const struct brw_state_flags
*b
)
113 return ((a
->mesa
& b
->mesa
) ||
115 (a
->cache
& b
->cache
));
118 static void accumulate_state( struct brw_state_flags
*a
,
119 const struct brw_state_flags
*b
)
123 a
->cache
|= b
->cache
;
127 static void xor_states( struct brw_state_flags
*result
,
128 const struct brw_state_flags
*a
,
129 const struct brw_state_flags
*b
)
131 result
->mesa
= a
->mesa
^ b
->mesa
;
132 result
->brw
= a
->brw
^ b
->brw
;
133 result
->cache
= a
->cache
^ b
->cache
;
137 brw_clear_validated_bos(struct brw_context
*brw
)
141 /* Clear the last round of validated bos */
142 for (i
= 0; i
< brw
->state
.validated_bo_count
; i
++) {
143 brw
->sws
->bo_unreference(brw
->state
.validated_bos
[i
]);
144 brw
->state
.validated_bos
[i
] = NULL
;
146 brw
->state
.validated_bo_count
= 0;
150 /***********************************************************************
153 enum pipe_error
brw_validate_state( struct brw_context
*brw
)
155 struct brw_state_flags
*state
= &brw
->state
.dirty
;
159 brw_clear_validated_bos(brw
);
160 brw_add_validated_bo(brw
, brw
->batch
->buf
);
162 if (brw
->flags
.always_emit_state
) {
168 if (state
->mesa
== 0 &&
173 if (brw
->state
.dirty
.brw
& BRW_NEW_CONTEXT
)
174 brw_clear_batch_cache(brw
);
176 /* do prepare stage for all atoms */
177 for (i
= 0; i
< Elements(atoms
); i
++) {
178 const struct brw_tracked_state
*atom
= atoms
[i
];
180 if (check_state(state
, &atom
->dirty
)) {
182 ret
= atom
->prepare(brw
);
189 /* Make sure that the textures which are referenced by the current
190 * brw fragment program are actually present/valid.
191 * If this fails, we can experience GPU lock-ups.
194 const struct brw_fragment_shader
*fp
= brw
->curr
.fragment_shader
;
196 assert(fp
->info
.file_max
[TGSI_FILE_SAMPLER
] < (int)brw
->curr
.num_samplers
);
197 assert(fp
->info
.texture_max
<= brw
->curr
.num_textures
);
205 enum pipe_error
brw_upload_state(struct brw_context
*brw
)
207 struct brw_state_flags
*state
= &brw
->state
.dirty
;
211 brw_clear_validated_bos(brw
);
214 /* Debug version which enforces various sanity checks on the
215 * state flags which are generated and checked to help ensure
216 * state atoms are ordered correctly in the list.
218 struct brw_state_flags examined
, prev
;
219 memset(&examined
, 0, sizeof(examined
));
222 for (i
= 0; i
< Elements(atoms
); i
++) {
223 const struct brw_tracked_state
*atom
= atoms
[i
];
224 struct brw_state_flags generated
;
226 assert(atom
->dirty
.mesa
||
230 if (check_state(state
, &atom
->dirty
)) {
232 ret
= atom
->emit( brw
);
238 accumulate_state(&examined
, &atom
->dirty
);
240 /* generated = (prev ^ state)
241 * if (examined & generated)
244 xor_states(&generated
, &prev
, state
);
245 assert(!check_state(&examined
, &generated
));
250 for (i
= 0; i
< Elements(atoms
); i
++) {
251 const struct brw_tracked_state
*atom
= atoms
[i
];
253 if (check_state(state
, &atom
->dirty
)) {
255 ret
= atom
->emit( brw
);
263 if (BRW_DEBUG
& DEBUG_STATE
) {
264 brw_update_dirty_counts( state
->mesa
,
269 /* Clear dirty flags:
271 memset(state
, 0, sizeof(*state
));