i965g: fix some asserts
[mesa.git] / src / gallium / drivers / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
38
39 const struct brw_tracked_state *atoms[] =
40 {
41 &brw_check_fallback,
42
43 // &brw_wm_input_sizes,
44 &brw_vs_prog,
45 &brw_gs_prog,
46 &brw_clip_prog,
47 &brw_sf_prog,
48 &brw_wm_prog,
49
50 /* Once all the programs are done, we know how large urb entry
51 * sizes need to be and can decide if we need to change the urb
52 * layout.
53 */
54 &brw_curbe_offsets,
55 &brw_recalculate_urb_fence,
56
57 &brw_cc_vp,
58 &brw_cc_unit,
59
60 &brw_vs_surfaces, /* must do before unit */
61 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
62 &brw_wm_surfaces, /* must do before samplers and unit */
63 &brw_wm_samplers,
64
65 &brw_wm_unit,
66 &brw_sf_vp,
67 &brw_sf_unit,
68 &brw_vs_unit, /* always required, enabled or not */
69 &brw_clip_unit,
70 &brw_gs_unit,
71
72 /* Command packets:
73 */
74 &brw_invarient_state,
75 &brw_state_base_address,
76
77 &brw_binding_table_pointers,
78 &brw_blend_constant_color,
79
80 &brw_depthbuffer,
81 &brw_polygon_stipple,
82 &brw_line_stipple,
83
84 &brw_psp_urb_cbs,
85
86 &brw_drawing_rect,
87 &brw_indices,
88 &brw_index_buffer,
89 &brw_vertices,
90
91 &brw_curbe_buffer
92 };
93
94
95 void brw_init_state( struct brw_context *brw )
96 {
97 brw_init_caches(brw);
98 }
99
100
101 void brw_destroy_state( struct brw_context *brw )
102 {
103 brw_destroy_caches(brw);
104 brw_destroy_batch_cache(brw);
105 }
106
107 /***********************************************************************
108 */
109
110 static GLboolean check_state( const struct brw_state_flags *a,
111 const struct brw_state_flags *b )
112 {
113 return ((a->mesa & b->mesa) ||
114 (a->brw & b->brw) ||
115 (a->cache & b->cache));
116 }
117
118 static void accumulate_state( struct brw_state_flags *a,
119 const struct brw_state_flags *b )
120 {
121 a->mesa |= b->mesa;
122 a->brw |= b->brw;
123 a->cache |= b->cache;
124 }
125
126
127 static void xor_states( struct brw_state_flags *result,
128 const struct brw_state_flags *a,
129 const struct brw_state_flags *b )
130 {
131 result->mesa = a->mesa ^ b->mesa;
132 result->brw = a->brw ^ b->brw;
133 result->cache = a->cache ^ b->cache;
134 }
135
136 static void
137 brw_clear_validated_bos(struct brw_context *brw)
138 {
139 int i;
140
141 /* Clear the last round of validated bos */
142 for (i = 0; i < brw->state.validated_bo_count; i++) {
143 brw->sws->bo_unreference(brw->state.validated_bos[i]);
144 brw->state.validated_bos[i] = NULL;
145 }
146 brw->state.validated_bo_count = 0;
147 }
148
149
150 /***********************************************************************
151 * Emit all state:
152 */
153 enum pipe_error brw_validate_state( struct brw_context *brw )
154 {
155 struct brw_state_flags *state = &brw->state.dirty;
156 GLuint i;
157 int ret;
158
159 brw_clear_validated_bos(brw);
160 brw_add_validated_bo(brw, brw->batch->buf);
161
162 if (brw->flags.always_emit_state) {
163 state->mesa |= ~0;
164 state->brw |= ~0;
165 state->cache |= ~0;
166 }
167
168 if (state->mesa == 0 &&
169 state->cache == 0 &&
170 state->brw == 0)
171 return 0;
172
173 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
174 brw_clear_batch_cache(brw);
175
176 /* do prepare stage for all atoms */
177 for (i = 0; i < Elements(atoms); i++) {
178 const struct brw_tracked_state *atom = atoms[i];
179
180 if (check_state(state, &atom->dirty)) {
181 if (atom->prepare) {
182 ret = atom->prepare(brw);
183 if (ret)
184 return ret;
185 }
186 }
187 }
188
189 /* Make sure that the textures which are referenced by the current
190 * brw fragment program are actually present/valid.
191 * If this fails, we can experience GPU lock-ups.
192 */
193 {
194 const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
195 if (fp) {
196 assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers);
197 assert(fp->info.texture_max <= brw->curr.num_textures);
198 }
199 }
200
201 return 0;
202 }
203
204
205 enum pipe_error brw_upload_state(struct brw_context *brw)
206 {
207 struct brw_state_flags *state = &brw->state.dirty;
208 int ret;
209 int i;
210
211 brw_clear_validated_bos(brw);
212
213 if (BRW_DEBUG) {
214 /* Debug version which enforces various sanity checks on the
215 * state flags which are generated and checked to help ensure
216 * state atoms are ordered correctly in the list.
217 */
218 struct brw_state_flags examined, prev;
219 memset(&examined, 0, sizeof(examined));
220 prev = *state;
221
222 for (i = 0; i < Elements(atoms); i++) {
223 const struct brw_tracked_state *atom = atoms[i];
224 struct brw_state_flags generated;
225
226 assert(atom->dirty.mesa ||
227 atom->dirty.brw ||
228 atom->dirty.cache);
229
230 if (check_state(state, &atom->dirty)) {
231 if (atom->emit) {
232 ret = atom->emit( brw );
233 if (ret)
234 return ret;
235 }
236 }
237
238 accumulate_state(&examined, &atom->dirty);
239
240 /* generated = (prev ^ state)
241 * if (examined & generated)
242 * fail;
243 */
244 xor_states(&generated, &prev, state);
245 assert(!check_state(&examined, &generated));
246 prev = *state;
247 }
248 }
249 else {
250 for (i = 0; i < Elements(atoms); i++) {
251 const struct brw_tracked_state *atom = atoms[i];
252
253 if (check_state(state, &atom->dirty)) {
254 if (atom->emit) {
255 ret = atom->emit( brw );
256 if (ret)
257 return ret;
258 }
259 }
260 }
261 }
262
263 if (BRW_DEBUG & DEBUG_STATE) {
264 brw_update_dirty_counts( state->mesa,
265 state->brw,
266 state->cache );
267 }
268
269 /* Clear dirty flags:
270 */
271 memset(state, 0, sizeof(*state));
272 return 0;
273 }