i965g: more work on compiling
[mesa.git] / src / gallium / drivers / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37
38 /* This is used to initialize brw->state.atoms[]. We could use this
39 * list directly except for a single atom, brw_constant_buffer, which
40 * has a .dirty value which changes according to the parameters of the
41 * current fragment and vertex programs, and so cannot be a static
42 * value.
43 */
44 const struct brw_tracked_state *atoms[] =
45 {
46 &brw_check_fallback,
47
48 // &brw_wm_input_sizes,
49 &brw_vs_prog,
50 &brw_gs_prog,
51 &brw_clip_prog,
52 &brw_sf_prog,
53 &brw_wm_prog,
54
55 /* Once all the programs are done, we know how large urb entry
56 * sizes need to be and can decide if we need to change the urb
57 * layout.
58 */
59 &brw_curbe_offsets,
60 &brw_recalculate_urb_fence,
61
62 &brw_cc_vp,
63 &brw_cc_unit,
64
65 &brw_vs_surfaces, /* must do before unit */
66 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
67 &brw_wm_surfaces, /* must do before samplers and unit */
68 &brw_wm_samplers,
69
70 &brw_wm_unit,
71 &brw_sf_vp,
72 &brw_sf_unit,
73 &brw_vs_unit, /* always required, enabled or not */
74 &brw_clip_unit,
75 &brw_gs_unit,
76
77 /* Command packets:
78 */
79 &brw_invarient_state,
80 &brw_state_base_address,
81
82 &brw_binding_table_pointers,
83 &brw_blend_constant_color,
84
85 &brw_depthbuffer,
86
87 &brw_polygon_stipple,
88 &brw_polygon_stipple_offset,
89
90 &brw_line_stipple,
91 &brw_aa_line_parameters,
92
93 &brw_psp_urb_cbs,
94
95 &brw_drawing_rect,
96 &brw_indices,
97 &brw_index_buffer,
98 &brw_vertices,
99
100 &brw_constant_buffer
101 };
102
103
104 void brw_init_state( struct brw_context *brw )
105 {
106 brw_init_caches(brw);
107 }
108
109
110 void brw_destroy_state( struct brw_context *brw )
111 {
112 brw_destroy_caches(brw);
113 brw_destroy_batch_cache(brw);
114 }
115
116 /***********************************************************************
117 */
118
119 static GLboolean check_state( const struct brw_state_flags *a,
120 const struct brw_state_flags *b )
121 {
122 return ((a->mesa & b->mesa) ||
123 (a->brw & b->brw) ||
124 (a->cache & b->cache));
125 }
126
127 static void accumulate_state( struct brw_state_flags *a,
128 const struct brw_state_flags *b )
129 {
130 a->mesa |= b->mesa;
131 a->brw |= b->brw;
132 a->cache |= b->cache;
133 }
134
135
136 static void xor_states( struct brw_state_flags *result,
137 const struct brw_state_flags *a,
138 const struct brw_state_flags *b )
139 {
140 result->mesa = a->mesa ^ b->mesa;
141 result->brw = a->brw ^ b->brw;
142 result->cache = a->cache ^ b->cache;
143 }
144
145 static void
146 brw_clear_validated_bos(struct brw_context *brw)
147 {
148 int i;
149
150 /* Clear the last round of validated bos */
151 for (i = 0; i < brw->state.validated_bo_count; i++) {
152 brw->sws->bo_unreference(brw->state.validated_bos[i]);
153 brw->state.validated_bos[i] = NULL;
154 }
155 brw->state.validated_bo_count = 0;
156 }
157
158
159 /***********************************************************************
160 * Emit all state:
161 */
162 enum pipe_error brw_validate_state( struct brw_context *brw )
163 {
164 struct brw_state_flags *state = &brw->state.dirty;
165 GLuint i;
166
167 brw_clear_validated_bos(brw);
168 brw_add_validated_bo(brw, intel->batch->buf);
169
170 if (brw->emit_state_always) {
171 state->mesa |= ~0;
172 state->brw |= ~0;
173 state->cache |= ~0;
174 }
175
176 if (state->mesa == 0 &&
177 state->cache == 0 &&
178 state->brw == 0)
179 return 0;
180
181 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
182 brw_clear_batch_cache(brw);
183
184 /* do prepare stage for all atoms */
185 for (i = 0; i < Elements(atoms); i++) {
186 const struct brw_tracked_state *atom = atoms[i];
187
188 if (check_state(state, &atom->dirty)) {
189 if (atom->prepare) {
190 ret = atom->prepare(brw);
191 if (ret)
192 return ret;
193 }
194 }
195 }
196
197 /* Make sure that the textures which are referenced by the current
198 * brw fragment program are actually present/valid.
199 * If this fails, we can experience GPU lock-ups.
200 */
201 {
202 const struct brw_fragment_program *fp = brw->fragment_program;
203 if (fp) {
204 assert(fp->info.max_sampler <= brw->nr_samplers &&
205 fp->info.max_texture <= brw->nr_textures);
206 }
207 }
208
209 return 0;
210 }
211
212
213 enum pipe_error brw_upload_state(struct brw_context *brw)
214 {
215 struct brw_state_flags *state = &brw->state.dirty;
216 int i;
217 static int dirty_count = 0;
218
219 brw_clear_validated_bos(brw);
220
221 if (INTEL_DEBUG) {
222 /* Debug version which enforces various sanity checks on the
223 * state flags which are generated and checked to help ensure
224 * state atoms are ordered correctly in the list.
225 */
226 struct brw_state_flags examined, prev;
227 _mesa_memset(&examined, 0, sizeof(examined));
228 prev = *state;
229
230 for (i = 0; i < Elements(atoms); i++) {
231 const struct brw_tracked_state *atom = atoms[i];
232 struct brw_state_flags generated;
233
234 assert(atom->dirty.mesa ||
235 atom->dirty.brw ||
236 atom->dirty.cache);
237
238 if (check_state(state, &atom->dirty)) {
239 if (atom->emit) {
240 ret = atom->emit( brw );
241 if (ret)
242 return ret;
243 }
244 }
245
246 accumulate_state(&examined, &atom->dirty);
247
248 /* generated = (prev ^ state)
249 * if (examined & generated)
250 * fail;
251 */
252 xor_states(&generated, &prev, state);
253 assert(!check_state(&examined, &generated));
254 prev = *state;
255 }
256 }
257 else {
258 for (i = 0; i < Elements(atoms); i++) {
259 const struct brw_tracked_state *atom = atoms[i];
260
261 if (check_state(state, &atom->dirty)) {
262 if (atom->emit) {
263 ret = atom->emit( brw );
264 if (ret)
265 return ret;
266 }
267 }
268 }
269 }
270
271 if (INTEL_DEBUG & DEBUG_STATE) {
272 brw_update_dirty_count(mesa_bits, state->mesa);
273 brw_update_dirty_count(brw_bits, state->brw);
274 brw_update_dirty_count(cache_bits, state->cache);
275 if (dirty_count++ % 1000 == 0) {
276 brw_print_dirty_count(mesa_bits, state->mesa);
277 brw_print_dirty_count(brw_bits, state->brw);
278 brw_print_dirty_count(cache_bits, state->cache);
279 debug_printf("\n");
280 }
281 }
282
283 /* Clear dirty flags:
284 */
285 memset(state, 0, sizeof(*state));
286 }