Merge remote branch 'origin/mesa_7_6_branch'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 const struct brw_tracked_state *atoms[] =
46 {
47 &brw_check_fallback,
48
49 &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_vp,
64 &brw_cc_unit,
65
66 &brw_vs_surfaces, /* must do before unit */
67 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
68 &brw_wm_surfaces, /* must do before samplers and unit */
69 &brw_wm_samplers,
70
71 &brw_wm_unit,
72 &brw_sf_vp,
73 &brw_sf_unit,
74 &brw_vs_unit, /* always required, enabled or not */
75 &brw_clip_unit,
76 &brw_gs_unit,
77
78 /* Command packets:
79 */
80 &brw_invarient_state,
81 &brw_state_base_address,
82
83 &brw_binding_table_pointers,
84 &brw_blend_constant_color,
85
86 &brw_depthbuffer,
87
88 &brw_polygon_stipple,
89 &brw_polygon_stipple_offset,
90
91 &brw_line_stipple,
92 &brw_aa_line_parameters,
93
94 &brw_psp_urb_cbs,
95
96 &brw_drawing_rect,
97 &brw_indices,
98 &brw_index_buffer,
99 &brw_vertices,
100
101 &brw_constant_buffer
102 };
103
104
105 void brw_init_state( struct brw_context *brw )
106 {
107 brw_init_caches(brw);
108 }
109
110
111 void brw_destroy_state( struct brw_context *brw )
112 {
113 brw_destroy_caches(brw);
114 brw_destroy_batch_cache(brw);
115 }
116
117 /***********************************************************************
118 */
119
120 static GLboolean check_state( const struct brw_state_flags *a,
121 const struct brw_state_flags *b )
122 {
123 return ((a->mesa & b->mesa) ||
124 (a->brw & b->brw) ||
125 (a->cache & b->cache));
126 }
127
128 static void accumulate_state( struct brw_state_flags *a,
129 const struct brw_state_flags *b )
130 {
131 a->mesa |= b->mesa;
132 a->brw |= b->brw;
133 a->cache |= b->cache;
134 }
135
136
137 static void xor_states( struct brw_state_flags *result,
138 const struct brw_state_flags *a,
139 const struct brw_state_flags *b )
140 {
141 result->mesa = a->mesa ^ b->mesa;
142 result->brw = a->brw ^ b->brw;
143 result->cache = a->cache ^ b->cache;
144 }
145
146 static void
147 brw_clear_validated_bos(struct brw_context *brw)
148 {
149 int i;
150
151 /* Clear the last round of validated bos */
152 for (i = 0; i < brw->state.validated_bo_count; i++) {
153 dri_bo_unreference(brw->state.validated_bos[i]);
154 brw->state.validated_bos[i] = NULL;
155 }
156 brw->state.validated_bo_count = 0;
157 }
158
159 struct dirty_bit_map {
160 uint32_t bit;
161 char *name;
162 uint32_t count;
163 };
164
165 #define DEFINE_BIT(name) {name, #name, 0}
166
167 static struct dirty_bit_map mesa_bits[] = {
168 DEFINE_BIT(_NEW_MODELVIEW),
169 DEFINE_BIT(_NEW_PROJECTION),
170 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
171 DEFINE_BIT(_NEW_COLOR_MATRIX),
172 DEFINE_BIT(_NEW_ACCUM),
173 DEFINE_BIT(_NEW_COLOR),
174 DEFINE_BIT(_NEW_DEPTH),
175 DEFINE_BIT(_NEW_EVAL),
176 DEFINE_BIT(_NEW_FOG),
177 DEFINE_BIT(_NEW_HINT),
178 DEFINE_BIT(_NEW_LIGHT),
179 DEFINE_BIT(_NEW_LINE),
180 DEFINE_BIT(_NEW_PIXEL),
181 DEFINE_BIT(_NEW_POINT),
182 DEFINE_BIT(_NEW_POLYGON),
183 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
184 DEFINE_BIT(_NEW_SCISSOR),
185 DEFINE_BIT(_NEW_STENCIL),
186 DEFINE_BIT(_NEW_TEXTURE),
187 DEFINE_BIT(_NEW_TRANSFORM),
188 DEFINE_BIT(_NEW_VIEWPORT),
189 DEFINE_BIT(_NEW_PACKUNPACK),
190 DEFINE_BIT(_NEW_ARRAY),
191 DEFINE_BIT(_NEW_RENDERMODE),
192 DEFINE_BIT(_NEW_BUFFERS),
193 DEFINE_BIT(_NEW_MULTISAMPLE),
194 DEFINE_BIT(_NEW_TRACK_MATRIX),
195 DEFINE_BIT(_NEW_PROGRAM),
196 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
197 {0, 0, 0}
198 };
199
200 static struct dirty_bit_map brw_bits[] = {
201 DEFINE_BIT(BRW_NEW_URB_FENCE),
202 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
203 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
204 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
205 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
206 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
207 DEFINE_BIT(BRW_NEW_PRIMITIVE),
208 DEFINE_BIT(BRW_NEW_CONTEXT),
209 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
210 DEFINE_BIT(BRW_NEW_PSP),
211 DEFINE_BIT(BRW_NEW_FENCE),
212 DEFINE_BIT(BRW_NEW_INDICES),
213 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
214 DEFINE_BIT(BRW_NEW_VERTICES),
215 DEFINE_BIT(BRW_NEW_BATCH),
216 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
217 {0, 0, 0}
218 };
219
220 static struct dirty_bit_map cache_bits[] = {
221 DEFINE_BIT(CACHE_NEW_CC_VP),
222 DEFINE_BIT(CACHE_NEW_CC_UNIT),
223 DEFINE_BIT(CACHE_NEW_WM_PROG),
224 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
225 DEFINE_BIT(CACHE_NEW_SAMPLER),
226 DEFINE_BIT(CACHE_NEW_WM_UNIT),
227 DEFINE_BIT(CACHE_NEW_SF_PROG),
228 DEFINE_BIT(CACHE_NEW_SF_VP),
229 DEFINE_BIT(CACHE_NEW_SF_UNIT),
230 DEFINE_BIT(CACHE_NEW_VS_UNIT),
231 DEFINE_BIT(CACHE_NEW_VS_PROG),
232 DEFINE_BIT(CACHE_NEW_GS_UNIT),
233 DEFINE_BIT(CACHE_NEW_GS_PROG),
234 DEFINE_BIT(CACHE_NEW_CLIP_VP),
235 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
236 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
237 DEFINE_BIT(CACHE_NEW_SURFACE),
238 DEFINE_BIT(CACHE_NEW_SURF_BIND),
239 {0, 0, 0}
240 };
241
242
243 static void
244 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
245 {
246 int i;
247
248 for (i = 0; i < 32; i++) {
249 if (bit_map[i].bit == 0)
250 return;
251
252 if (bit_map[i].bit & bits)
253 bit_map[i].count++;
254 }
255 }
256
257 static void
258 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
259 {
260 int i;
261
262 for (i = 0; i < 32; i++) {
263 if (bit_map[i].bit == 0)
264 return;
265
266 fprintf(stderr, "0x%08x: %12d (%s)\n",
267 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
268 }
269 }
270
271 /***********************************************************************
272 * Emit all state:
273 */
274 void brw_validate_state( struct brw_context *brw )
275 {
276 GLcontext *ctx = &brw->intel.ctx;
277 struct intel_context *intel = &brw->intel;
278 struct brw_state_flags *state = &brw->state.dirty;
279 GLuint i;
280
281 brw_clear_validated_bos(brw);
282
283 state->mesa |= brw->intel.NewGLState;
284 brw->intel.NewGLState = 0;
285
286 brw_add_validated_bo(brw, intel->batch->buf);
287
288 if (brw->emit_state_always) {
289 state->mesa |= ~0;
290 state->brw |= ~0;
291 state->cache |= ~0;
292 }
293
294 if (brw->fragment_program != ctx->FragmentProgram._Current) {
295 brw->fragment_program = ctx->FragmentProgram._Current;
296 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
297 }
298
299 if (brw->vertex_program != ctx->VertexProgram._Current) {
300 brw->vertex_program = ctx->VertexProgram._Current;
301 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
302 }
303
304 if (state->mesa == 0 &&
305 state->cache == 0 &&
306 state->brw == 0)
307 return;
308
309 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
310 brw_clear_batch_cache(brw);
311
312 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
313
314 /* do prepare stage for all atoms */
315 for (i = 0; i < Elements(atoms); i++) {
316 const struct brw_tracked_state *atom = atoms[i];
317
318 if (brw->intel.Fallback)
319 break;
320
321 if (check_state(state, &atom->dirty)) {
322 if (atom->prepare) {
323 atom->prepare(brw);
324 }
325 }
326 }
327
328 intel_check_front_buffer_rendering(intel);
329
330 /* Make sure that the textures which are referenced by the current
331 * brw fragment program are actually present/valid.
332 * If this fails, we can experience GPU lock-ups.
333 */
334 {
335 const struct brw_fragment_program *fp;
336 fp = brw_fragment_program_const(brw->fragment_program);
337 if (fp) {
338 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
339 == fp->tex_units_used);
340 }
341 }
342 }
343
344
345 void brw_upload_state(struct brw_context *brw)
346 {
347 struct brw_state_flags *state = &brw->state.dirty;
348 int i;
349 static int dirty_count = 0;
350
351 brw_clear_validated_bos(brw);
352
353 if (INTEL_DEBUG) {
354 /* Debug version which enforces various sanity checks on the
355 * state flags which are generated and checked to help ensure
356 * state atoms are ordered correctly in the list.
357 */
358 struct brw_state_flags examined, prev;
359 _mesa_memset(&examined, 0, sizeof(examined));
360 prev = *state;
361
362 for (i = 0; i < Elements(atoms); i++) {
363 const struct brw_tracked_state *atom = atoms[i];
364 struct brw_state_flags generated;
365
366 assert(atom->dirty.mesa ||
367 atom->dirty.brw ||
368 atom->dirty.cache);
369
370 if (brw->intel.Fallback)
371 break;
372
373 if (check_state(state, &atom->dirty)) {
374 if (atom->emit) {
375 atom->emit( brw );
376 }
377 }
378
379 accumulate_state(&examined, &atom->dirty);
380
381 /* generated = (prev ^ state)
382 * if (examined & generated)
383 * fail;
384 */
385 xor_states(&generated, &prev, state);
386 assert(!check_state(&examined, &generated));
387 prev = *state;
388 }
389 }
390 else {
391 for (i = 0; i < Elements(atoms); i++) {
392 const struct brw_tracked_state *atom = atoms[i];
393
394 if (brw->intel.Fallback)
395 break;
396
397 if (check_state(state, &atom->dirty)) {
398 if (atom->emit) {
399 atom->emit( brw );
400 }
401 }
402 }
403 }
404
405 if (INTEL_DEBUG & DEBUG_STATE) {
406 brw_update_dirty_count(mesa_bits, state->mesa);
407 brw_update_dirty_count(brw_bits, state->brw);
408 brw_update_dirty_count(cache_bits, state->cache);
409 if (dirty_count++ % 1000 == 0) {
410 brw_print_dirty_count(mesa_bits, state->mesa);
411 brw_print_dirty_count(brw_bits, state->brw);
412 brw_print_dirty_count(cache_bits, state->cache);
413 fprintf(stderr, "\n");
414 }
415 }
416
417 if (!brw->intel.Fallback)
418 memset(state, 0, sizeof(*state));
419 }