Merge branch 'mesa_7_7_branch'
[mesa.git] / src / gallium / drivers / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
38
39 const struct brw_tracked_state *atoms[] =
40 {
41 /* &brw_wm_input_sizes, */
42 &brw_vs_prog,
43 &brw_gs_prog,
44 &brw_clip_prog,
45 &brw_sf_prog,
46 &brw_wm_prog,
47
48 /* Once all the programs are done, we know how large urb entry
49 * sizes need to be and can decide if we need to change the urb
50 * layout.
51 */
52 &brw_curbe_offsets,
53 &brw_recalculate_urb_fence,
54
55 &brw_cc_vp,
56 &brw_cc_unit,
57
58 &brw_vs_surfaces, /* must do before unit */
59 /*&brw_wm_constant_surface,*/ /* must do before wm surfaces/bind bo */
60 &brw_wm_surfaces, /* must do before samplers and unit */
61 &brw_wm_samplers,
62
63 &brw_wm_unit,
64 &brw_sf_vp,
65 &brw_sf_unit,
66 &brw_vs_unit, /* always required, enabled or not */
67 &brw_clip_unit,
68 &brw_gs_unit,
69
70 /* Command packets:
71 */
72 &brw_invarient_state,
73 &brw_state_base_address,
74
75 &brw_binding_table_pointers,
76 &brw_blend_constant_color,
77
78 &brw_depthbuffer,
79 &brw_polygon_stipple,
80 &brw_line_stipple,
81
82 &brw_psp_urb_cbs,
83
84 &brw_drawing_rect,
85 &brw_indices,
86 &brw_index_buffer,
87 &brw_vertices,
88
89 &brw_curbe_buffer
90 };
91
92
93 void brw_init_state( struct brw_context *brw )
94 {
95 brw_init_caches(brw);
96 }
97
98
99 void brw_destroy_state( struct brw_context *brw )
100 {
101 brw_destroy_caches(brw);
102 brw_destroy_batch_cache(brw);
103 }
104
105 /***********************************************************************
106 */
107
108 static GLboolean check_state( const struct brw_state_flags *a,
109 const struct brw_state_flags *b )
110 {
111 return ((a->mesa & b->mesa) ||
112 (a->brw & b->brw) ||
113 (a->cache & b->cache));
114 }
115
116 static void accumulate_state( struct brw_state_flags *a,
117 const struct brw_state_flags *b )
118 {
119 a->mesa |= b->mesa;
120 a->brw |= b->brw;
121 a->cache |= b->cache;
122 }
123
124
125 static void xor_states( struct brw_state_flags *result,
126 const struct brw_state_flags *a,
127 const struct brw_state_flags *b )
128 {
129 result->mesa = a->mesa ^ b->mesa;
130 result->brw = a->brw ^ b->brw;
131 result->cache = a->cache ^ b->cache;
132 }
133
134 static void
135 brw_clear_validated_bos(struct brw_context *brw)
136 {
137 int i;
138
139 /* Clear the last round of validated bos */
140 for (i = 0; i < brw->state.validated_bo_count; i++) {
141 bo_reference(&brw->state.validated_bos[i], NULL);
142 }
143 brw->state.validated_bo_count = 0;
144 }
145
146
147 /***********************************************************************
148 * Emit all state:
149 */
150 enum pipe_error brw_validate_state( struct brw_context *brw )
151 {
152 struct brw_state_flags *state = &brw->state.dirty;
153 GLuint i;
154 int ret;
155
156 brw_clear_validated_bos(brw);
157 brw_add_validated_bo(brw, brw->batch->buf);
158
159 if (brw->flags.always_emit_state) {
160 state->mesa |= ~0;
161 state->brw |= ~0;
162 state->cache |= ~0;
163 }
164
165 if (state->mesa == 0 &&
166 state->cache == 0 &&
167 state->brw == 0)
168 return 0;
169
170 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
171 brw_clear_batch_cache(brw);
172
173 /* do prepare stage for all atoms */
174 for (i = 0; i < Elements(atoms); i++) {
175 const struct brw_tracked_state *atom = atoms[i];
176
177 if (check_state(state, &atom->dirty)) {
178 if (atom->prepare) {
179 ret = atom->prepare(brw);
180 if (ret)
181 return ret;
182 }
183 }
184 }
185
186 /* Make sure that the textures which are referenced by the current
187 * brw fragment program are actually present/valid.
188 * If this fails, we can experience GPU lock-ups.
189 */
190 {
191 const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
192 if (fp) {
193 assert(fp->info.file_max[TGSI_FILE_SAMPLER] < (int)brw->curr.num_samplers);
194 /*assert(fp->info.texture_max <= brw->curr.num_textures);*/
195 }
196 }
197
198 return 0;
199 }
200
201
202 enum pipe_error brw_upload_state(struct brw_context *brw)
203 {
204 struct brw_state_flags *state = &brw->state.dirty;
205 int ret;
206 int i;
207
208 brw_clear_validated_bos(brw);
209
210 if (BRW_DEBUG) {
211 /* Debug version which enforces various sanity checks on the
212 * state flags which are generated and checked to help ensure
213 * state atoms are ordered correctly in the list.
214 */
215 struct brw_state_flags examined, prev;
216 memset(&examined, 0, sizeof(examined));
217 prev = *state;
218
219 for (i = 0; i < Elements(atoms); i++) {
220 const struct brw_tracked_state *atom = atoms[i];
221 struct brw_state_flags generated;
222
223 assert(atom->dirty.mesa ||
224 atom->dirty.brw ||
225 atom->dirty.cache);
226
227 if (check_state(state, &atom->dirty)) {
228 if (atom->emit) {
229 ret = atom->emit( brw );
230 if (ret)
231 return ret;
232 }
233 }
234
235 accumulate_state(&examined, &atom->dirty);
236
237 /* generated = (prev ^ state)
238 * if (examined & generated)
239 * fail;
240 */
241 xor_states(&generated, &prev, state);
242 assert(!check_state(&examined, &generated));
243 prev = *state;
244 }
245 }
246 else {
247 for (i = 0; i < Elements(atoms); i++) {
248 const struct brw_tracked_state *atom = atoms[i];
249
250 if (check_state(state, &atom->dirty)) {
251 if (atom->emit) {
252 ret = atom->emit( brw );
253 if (ret)
254 return ret;
255 }
256 }
257 }
258 }
259
260 if (BRW_DEBUG & DEBUG_STATE) {
261 brw_update_dirty_counts( state->mesa,
262 state->brw,
263 state->cache );
264 }
265
266 /* Clear dirty flags:
267 */
268 memset(state, 0, sizeof(*state));
269 return 0;
270 }