i965g: add lots of error checks and early returns
[mesa.git] / src / gallium / drivers / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
38
39 const struct brw_tracked_state *atoms[] =
40 {
41 &brw_check_fallback,
42
43 // &brw_wm_input_sizes,
44 &brw_vs_prog,
45 &brw_gs_prog,
46 &brw_clip_prog,
47 &brw_sf_prog,
48 &brw_wm_prog,
49
50 /* Once all the programs are done, we know how large urb entry
51 * sizes need to be and can decide if we need to change the urb
52 * layout.
53 */
54 &brw_curbe_offsets,
55 &brw_recalculate_urb_fence,
56
57 &brw_cc_vp,
58 &brw_cc_unit,
59
60 &brw_vs_surfaces, /* must do before unit */
61 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
62 &brw_wm_surfaces, /* must do before samplers and unit */
63 &brw_wm_samplers,
64
65 &brw_wm_unit,
66 &brw_sf_vp,
67 &brw_sf_unit,
68 &brw_vs_unit, /* always required, enabled or not */
69 &brw_clip_unit,
70 &brw_gs_unit,
71
72 /* Command packets:
73 */
74 &brw_invarient_state,
75 &brw_state_base_address,
76
77 &brw_binding_table_pointers,
78 &brw_blend_constant_color,
79
80 &brw_depthbuffer,
81 &brw_polygon_stipple,
82 &brw_line_stipple,
83
84 &brw_psp_urb_cbs,
85
86 &brw_drawing_rect,
87 &brw_indices,
88 &brw_index_buffer,
89 &brw_vertices,
90
91 &brw_curbe_buffer
92 };
93
94
95 void brw_init_state( struct brw_context *brw )
96 {
97 brw_init_caches(brw);
98 }
99
100
101 void brw_destroy_state( struct brw_context *brw )
102 {
103 brw_destroy_caches(brw);
104 brw_destroy_batch_cache(brw);
105 }
106
107 /***********************************************************************
108 */
109
110 static GLboolean check_state( const struct brw_state_flags *a,
111 const struct brw_state_flags *b )
112 {
113 return ((a->mesa & b->mesa) ||
114 (a->brw & b->brw) ||
115 (a->cache & b->cache));
116 }
117
118 static void accumulate_state( struct brw_state_flags *a,
119 const struct brw_state_flags *b )
120 {
121 a->mesa |= b->mesa;
122 a->brw |= b->brw;
123 a->cache |= b->cache;
124 }
125
126
127 static void xor_states( struct brw_state_flags *result,
128 const struct brw_state_flags *a,
129 const struct brw_state_flags *b )
130 {
131 result->mesa = a->mesa ^ b->mesa;
132 result->brw = a->brw ^ b->brw;
133 result->cache = a->cache ^ b->cache;
134 }
135
136 static void
137 brw_clear_validated_bos(struct brw_context *brw)
138 {
139 int i;
140
141 /* Clear the last round of validated bos */
142 for (i = 0; i < brw->state.validated_bo_count; i++) {
143 bo_reference(&brw->state.validated_bos[i], NULL);
144 }
145 brw->state.validated_bo_count = 0;
146 }
147
148
149 /***********************************************************************
150 * Emit all state:
151 */
152 enum pipe_error brw_validate_state( struct brw_context *brw )
153 {
154 struct brw_state_flags *state = &brw->state.dirty;
155 GLuint i;
156 int ret;
157
158 brw_clear_validated_bos(brw);
159 brw_add_validated_bo(brw, brw->batch->buf);
160
161 if (brw->flags.always_emit_state) {
162 state->mesa |= ~0;
163 state->brw |= ~0;
164 state->cache |= ~0;
165 }
166
167 if (state->mesa == 0 &&
168 state->cache == 0 &&
169 state->brw == 0)
170 return 0;
171
172 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
173 brw_clear_batch_cache(brw);
174
175 /* do prepare stage for all atoms */
176 for (i = 0; i < Elements(atoms); i++) {
177 const struct brw_tracked_state *atom = atoms[i];
178
179 if (check_state(state, &atom->dirty)) {
180 if (atom->prepare) {
181 ret = atom->prepare(brw);
182 if (ret)
183 return ret;
184 }
185 }
186 }
187
188 /* Make sure that the textures which are referenced by the current
189 * brw fragment program are actually present/valid.
190 * If this fails, we can experience GPU lock-ups.
191 */
192 {
193 const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
194 if (fp) {
195 assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers);
196 assert(fp->info.texture_max <= brw->curr.num_textures);
197 }
198 }
199
200 return 0;
201 }
202
203
204 enum pipe_error brw_upload_state(struct brw_context *brw)
205 {
206 struct brw_state_flags *state = &brw->state.dirty;
207 int ret;
208 int i;
209
210 brw_clear_validated_bos(brw);
211
212 if (BRW_DEBUG) {
213 /* Debug version which enforces various sanity checks on the
214 * state flags which are generated and checked to help ensure
215 * state atoms are ordered correctly in the list.
216 */
217 struct brw_state_flags examined, prev;
218 memset(&examined, 0, sizeof(examined));
219 prev = *state;
220
221 for (i = 0; i < Elements(atoms); i++) {
222 const struct brw_tracked_state *atom = atoms[i];
223 struct brw_state_flags generated;
224
225 assert(atom->dirty.mesa ||
226 atom->dirty.brw ||
227 atom->dirty.cache);
228
229 if (check_state(state, &atom->dirty)) {
230 if (atom->emit) {
231 ret = atom->emit( brw );
232 if (ret)
233 return ret;
234 }
235 }
236
237 accumulate_state(&examined, &atom->dirty);
238
239 /* generated = (prev ^ state)
240 * if (examined & generated)
241 * fail;
242 */
243 xor_states(&generated, &prev, state);
244 assert(!check_state(&examined, &generated));
245 prev = *state;
246 }
247 }
248 else {
249 for (i = 0; i < Elements(atoms); i++) {
250 const struct brw_tracked_state *atom = atoms[i];
251
252 if (check_state(state, &atom->dirty)) {
253 if (atom->emit) {
254 ret = atom->emit( brw );
255 if (ret)
256 return ret;
257 }
258 }
259 }
260 }
261
262 if (BRW_DEBUG & DEBUG_STATE) {
263 brw_update_dirty_counts( state->mesa,
264 state->brw,
265 state->cache );
266 }
267
268 /* Clear dirty flags:
269 */
270 memset(state, 0, sizeof(*state));
271 return 0;
272 }