i965g: still working on compilation
[mesa.git] / src / gallium / drivers / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "brw_batchbuffer.h"
37 #include "brw_debug.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 const struct brw_tracked_state *atoms[] =
46 {
47 &brw_check_fallback,
48
49 // &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_vp,
64 &brw_cc_unit,
65
66 &brw_vs_surfaces, /* must do before unit */
67 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
68 &brw_wm_surfaces, /* must do before samplers and unit */
69 &brw_wm_samplers,
70
71 &brw_wm_unit,
72 &brw_sf_vp,
73 &brw_sf_unit,
74 &brw_vs_unit, /* always required, enabled or not */
75 &brw_clip_unit,
76 &brw_gs_unit,
77
78 /* Command packets:
79 */
80 &brw_invarient_state,
81 &brw_state_base_address,
82
83 &brw_binding_table_pointers,
84 &brw_blend_constant_color,
85
86 &brw_depthbuffer,
87 &brw_polygon_stipple,
88 &brw_line_stipple,
89
90 &brw_psp_urb_cbs,
91
92 &brw_drawing_rect,
93 &brw_indices,
94 &brw_index_buffer,
95 &brw_vertices,
96
97 &brw_constant_buffer
98 };
99
100
101 void brw_init_state( struct brw_context *brw )
102 {
103 brw_init_caches(brw);
104 }
105
106
107 void brw_destroy_state( struct brw_context *brw )
108 {
109 brw_destroy_caches(brw);
110 brw_destroy_batch_cache(brw);
111 }
112
113 /***********************************************************************
114 */
115
116 static GLboolean check_state( const struct brw_state_flags *a,
117 const struct brw_state_flags *b )
118 {
119 return ((a->mesa & b->mesa) ||
120 (a->brw & b->brw) ||
121 (a->cache & b->cache));
122 }
123
124 static void accumulate_state( struct brw_state_flags *a,
125 const struct brw_state_flags *b )
126 {
127 a->mesa |= b->mesa;
128 a->brw |= b->brw;
129 a->cache |= b->cache;
130 }
131
132
133 static void xor_states( struct brw_state_flags *result,
134 const struct brw_state_flags *a,
135 const struct brw_state_flags *b )
136 {
137 result->mesa = a->mesa ^ b->mesa;
138 result->brw = a->brw ^ b->brw;
139 result->cache = a->cache ^ b->cache;
140 }
141
142 static void
143 brw_clear_validated_bos(struct brw_context *brw)
144 {
145 int i;
146
147 /* Clear the last round of validated bos */
148 for (i = 0; i < brw->state.validated_bo_count; i++) {
149 brw->sws->bo_unreference(brw->state.validated_bos[i]);
150 brw->state.validated_bos[i] = NULL;
151 }
152 brw->state.validated_bo_count = 0;
153 }
154
155
156 /***********************************************************************
157 * Emit all state:
158 */
159 enum pipe_error brw_validate_state( struct brw_context *brw )
160 {
161 struct brw_state_flags *state = &brw->state.dirty;
162 GLuint i;
163 int ret;
164
165 brw_clear_validated_bos(brw);
166 brw_add_validated_bo(brw, brw->batch->buf);
167
168 if (brw->flags.always_emit_state) {
169 state->mesa |= ~0;
170 state->brw |= ~0;
171 state->cache |= ~0;
172 }
173
174 if (state->mesa == 0 &&
175 state->cache == 0 &&
176 state->brw == 0)
177 return 0;
178
179 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
180 brw_clear_batch_cache(brw);
181
182 /* do prepare stage for all atoms */
183 for (i = 0; i < Elements(atoms); i++) {
184 const struct brw_tracked_state *atom = atoms[i];
185
186 if (check_state(state, &atom->dirty)) {
187 if (atom->prepare) {
188 ret = atom->prepare(brw);
189 if (ret)
190 return ret;
191 }
192 }
193 }
194
195 /* Make sure that the textures which are referenced by the current
196 * brw fragment program are actually present/valid.
197 * If this fails, we can experience GPU lock-ups.
198 */
199 {
200 const struct brw_fragment_shader *fp = brw->curr.fragment_shader;
201 if (fp) {
202 assert(fp->info.file_max[TGSI_FILE_SAMPLER] < brw->curr.num_samplers &&
203 fp->info.texture_max < brw->curr.num_textures);
204 }
205 }
206
207 return 0;
208 }
209
210
211 enum pipe_error brw_upload_state(struct brw_context *brw)
212 {
213 struct brw_state_flags *state = &brw->state.dirty;
214 int ret;
215 int i;
216
217 brw_clear_validated_bos(brw);
218
219 if (BRW_DEBUG) {
220 /* Debug version which enforces various sanity checks on the
221 * state flags which are generated and checked to help ensure
222 * state atoms are ordered correctly in the list.
223 */
224 struct brw_state_flags examined, prev;
225 memset(&examined, 0, sizeof(examined));
226 prev = *state;
227
228 for (i = 0; i < Elements(atoms); i++) {
229 const struct brw_tracked_state *atom = atoms[i];
230 struct brw_state_flags generated;
231
232 assert(atom->dirty.mesa ||
233 atom->dirty.brw ||
234 atom->dirty.cache);
235
236 if (check_state(state, &atom->dirty)) {
237 if (atom->emit) {
238 ret = atom->emit( brw );
239 if (ret)
240 return ret;
241 }
242 }
243
244 accumulate_state(&examined, &atom->dirty);
245
246 /* generated = (prev ^ state)
247 * if (examined & generated)
248 * fail;
249 */
250 xor_states(&generated, &prev, state);
251 assert(!check_state(&examined, &generated));
252 prev = *state;
253 }
254 }
255 else {
256 for (i = 0; i < Elements(atoms); i++) {
257 const struct brw_tracked_state *atom = atoms[i];
258
259 if (check_state(state, &atom->dirty)) {
260 if (atom->emit) {
261 ret = atom->emit( brw );
262 if (ret)
263 return ret;
264 }
265 }
266 }
267 }
268
269 if (BRW_DEBUG & DEBUG_STATE) {
270 brw_update_dirty_counts( state->mesa,
271 state->brw,
272 state->cache );
273 }
274
275 /* Clear dirty flags:
276 */
277 memset(state, 0, sizeof(*state));
278 return 0;
279 }