Merge branch 'mesa_7_5_branch'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37
38 /* This is used to initialize brw->state.atoms[]. We could use this
39 * list directly except for a single atom, brw_constant_buffer, which
40 * has a .dirty value which changes according to the parameters of the
41 * current fragment and vertex programs, and so cannot be a static
42 * value.
43 */
44 const struct brw_tracked_state *atoms[] =
45 {
46 &brw_check_fallback,
47
48 &brw_wm_input_sizes,
49 &brw_vs_prog,
50 &brw_gs_prog,
51 &brw_clip_prog,
52 &brw_sf_prog,
53 &brw_wm_prog,
54
55 /* Once all the programs are done, we know how large urb entry
56 * sizes need to be and can decide if we need to change the urb
57 * layout.
58 */
59 &brw_curbe_offsets,
60 &brw_recalculate_urb_fence,
61
62 &brw_cc_vp,
63 &brw_cc_unit,
64
65 &brw_vs_surfaces, /* must do before unit */
66 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
67 &brw_wm_surfaces, /* must do before samplers and unit */
68 &brw_wm_samplers,
69
70 &brw_wm_unit,
71 &brw_sf_vp,
72 &brw_sf_unit,
73 &brw_vs_unit, /* always required, enabled or not */
74 &brw_clip_unit,
75 &brw_gs_unit,
76
77 /* Command packets:
78 */
79 &brw_invarient_state,
80 &brw_state_base_address,
81
82 &brw_binding_table_pointers,
83 &brw_blend_constant_color,
84
85 &brw_depthbuffer,
86
87 &brw_polygon_stipple,
88 &brw_polygon_stipple_offset,
89
90 &brw_line_stipple,
91 &brw_aa_line_parameters,
92
93 &brw_psp_urb_cbs,
94
95 &brw_drawing_rect,
96 &brw_indices,
97 &brw_vertices,
98
99 &brw_constant_buffer
100 };
101
102
103 void brw_init_state( struct brw_context *brw )
104 {
105 brw_init_caches(brw);
106 }
107
108
109 void brw_destroy_state( struct brw_context *brw )
110 {
111 brw_destroy_caches(brw);
112 brw_destroy_batch_cache(brw);
113 }
114
115 /***********************************************************************
116 */
117
118 static GLboolean check_state( const struct brw_state_flags *a,
119 const struct brw_state_flags *b )
120 {
121 return ((a->mesa & b->mesa) ||
122 (a->brw & b->brw) ||
123 (a->cache & b->cache));
124 }
125
126 static void accumulate_state( struct brw_state_flags *a,
127 const struct brw_state_flags *b )
128 {
129 a->mesa |= b->mesa;
130 a->brw |= b->brw;
131 a->cache |= b->cache;
132 }
133
134
135 static void xor_states( struct brw_state_flags *result,
136 const struct brw_state_flags *a,
137 const struct brw_state_flags *b )
138 {
139 result->mesa = a->mesa ^ b->mesa;
140 result->brw = a->brw ^ b->brw;
141 result->cache = a->cache ^ b->cache;
142 }
143
144 static void
145 brw_clear_validated_bos(struct brw_context *brw)
146 {
147 int i;
148
149 /* Clear the last round of validated bos */
150 for (i = 0; i < brw->state.validated_bo_count; i++) {
151 dri_bo_unreference(brw->state.validated_bos[i]);
152 brw->state.validated_bos[i] = NULL;
153 }
154 brw->state.validated_bo_count = 0;
155 }
156
157 struct dirty_bit_map {
158 uint32_t bit;
159 char *name;
160 uint32_t count;
161 };
162
163 #define DEFINE_BIT(name) {name, #name, 0}
164
165 static struct dirty_bit_map mesa_bits[] = {
166 DEFINE_BIT(_NEW_MODELVIEW),
167 DEFINE_BIT(_NEW_PROJECTION),
168 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
169 DEFINE_BIT(_NEW_COLOR_MATRIX),
170 DEFINE_BIT(_NEW_ACCUM),
171 DEFINE_BIT(_NEW_COLOR),
172 DEFINE_BIT(_NEW_DEPTH),
173 DEFINE_BIT(_NEW_EVAL),
174 DEFINE_BIT(_NEW_FOG),
175 DEFINE_BIT(_NEW_HINT),
176 DEFINE_BIT(_NEW_LIGHT),
177 DEFINE_BIT(_NEW_LINE),
178 DEFINE_BIT(_NEW_PIXEL),
179 DEFINE_BIT(_NEW_POINT),
180 DEFINE_BIT(_NEW_POLYGON),
181 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
182 DEFINE_BIT(_NEW_SCISSOR),
183 DEFINE_BIT(_NEW_STENCIL),
184 DEFINE_BIT(_NEW_TEXTURE),
185 DEFINE_BIT(_NEW_TRANSFORM),
186 DEFINE_BIT(_NEW_VIEWPORT),
187 DEFINE_BIT(_NEW_PACKUNPACK),
188 DEFINE_BIT(_NEW_ARRAY),
189 DEFINE_BIT(_NEW_RENDERMODE),
190 DEFINE_BIT(_NEW_BUFFERS),
191 DEFINE_BIT(_NEW_MULTISAMPLE),
192 DEFINE_BIT(_NEW_TRACK_MATRIX),
193 DEFINE_BIT(_NEW_PROGRAM),
194 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
195 {0, 0, 0}
196 };
197
198 static struct dirty_bit_map brw_bits[] = {
199 DEFINE_BIT(BRW_NEW_URB_FENCE),
200 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
201 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
202 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
203 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
204 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
205 DEFINE_BIT(BRW_NEW_PRIMITIVE),
206 DEFINE_BIT(BRW_NEW_CONTEXT),
207 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
208 DEFINE_BIT(BRW_NEW_PSP),
209 DEFINE_BIT(BRW_NEW_FENCE),
210 DEFINE_BIT(BRW_NEW_INDICES),
211 DEFINE_BIT(BRW_NEW_VERTICES),
212 DEFINE_BIT(BRW_NEW_BATCH),
213 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
214 {0, 0, 0}
215 };
216
217 static struct dirty_bit_map cache_bits[] = {
218 DEFINE_BIT(CACHE_NEW_CC_VP),
219 DEFINE_BIT(CACHE_NEW_CC_UNIT),
220 DEFINE_BIT(CACHE_NEW_WM_PROG),
221 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
222 DEFINE_BIT(CACHE_NEW_SAMPLER),
223 DEFINE_BIT(CACHE_NEW_WM_UNIT),
224 DEFINE_BIT(CACHE_NEW_SF_PROG),
225 DEFINE_BIT(CACHE_NEW_SF_VP),
226 DEFINE_BIT(CACHE_NEW_SF_UNIT),
227 DEFINE_BIT(CACHE_NEW_VS_UNIT),
228 DEFINE_BIT(CACHE_NEW_VS_PROG),
229 DEFINE_BIT(CACHE_NEW_GS_UNIT),
230 DEFINE_BIT(CACHE_NEW_GS_PROG),
231 DEFINE_BIT(CACHE_NEW_CLIP_VP),
232 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
233 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
234 DEFINE_BIT(CACHE_NEW_SURFACE),
235 DEFINE_BIT(CACHE_NEW_SURF_BIND),
236 {0, 0, 0}
237 };
238
239
240 static void
241 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
242 {
243 int i;
244
245 for (i = 0; i < 32; i++) {
246 if (bit_map[i].bit == 0)
247 return;
248
249 if (bit_map[i].bit & bits)
250 bit_map[i].count++;
251 }
252 }
253
254 static void
255 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
256 {
257 int i;
258
259 for (i = 0; i < 32; i++) {
260 if (bit_map[i].bit == 0)
261 return;
262
263 fprintf(stderr, "0x%08x: %12d (%s)\n",
264 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
265 }
266 }
267
268 /***********************************************************************
269 * Emit all state:
270 */
271 void brw_validate_state( struct brw_context *brw )
272 {
273 GLcontext *ctx = &brw->intel.ctx;
274 struct intel_context *intel = &brw->intel;
275 struct brw_state_flags *state = &brw->state.dirty;
276 GLuint i;
277
278 brw_clear_validated_bos(brw);
279
280 state->mesa |= brw->intel.NewGLState;
281 brw->intel.NewGLState = 0;
282
283 brw_add_validated_bo(brw, intel->batch->buf);
284
285 if (brw->emit_state_always) {
286 state->mesa |= ~0;
287 state->brw |= ~0;
288 }
289
290 if (brw->fragment_program != ctx->FragmentProgram._Current) {
291 brw->fragment_program = ctx->FragmentProgram._Current;
292 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
293 }
294
295 if (brw->vertex_program != ctx->VertexProgram._Current) {
296 brw->vertex_program = ctx->VertexProgram._Current;
297 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
298 }
299
300 if (state->mesa == 0 &&
301 state->cache == 0 &&
302 state->brw == 0)
303 return;
304
305 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
306 brw_clear_batch_cache_flush(brw);
307
308 brw->intel.Fallback = 0;
309
310 /* do prepare stage for all atoms */
311 for (i = 0; i < Elements(atoms); i++) {
312 const struct brw_tracked_state *atom = atoms[i];
313
314 if (brw->intel.Fallback)
315 break;
316
317 if (check_state(state, &atom->dirty)) {
318 if (atom->prepare) {
319 atom->prepare(brw);
320 }
321 }
322 }
323 }
324
325
326 void brw_upload_state(struct brw_context *brw)
327 {
328 struct brw_state_flags *state = &brw->state.dirty;
329 int i;
330 static int dirty_count = 0;
331
332 brw_clear_validated_bos(brw);
333
334 if (INTEL_DEBUG) {
335 /* Debug version which enforces various sanity checks on the
336 * state flags which are generated and checked to help ensure
337 * state atoms are ordered correctly in the list.
338 */
339 struct brw_state_flags examined, prev;
340 _mesa_memset(&examined, 0, sizeof(examined));
341 prev = *state;
342
343 for (i = 0; i < Elements(atoms); i++) {
344 const struct brw_tracked_state *atom = atoms[i];
345 struct brw_state_flags generated;
346
347 assert(atom->dirty.mesa ||
348 atom->dirty.brw ||
349 atom->dirty.cache);
350
351 if (brw->intel.Fallback)
352 break;
353
354 if (check_state(state, &atom->dirty)) {
355 if (atom->emit) {
356 atom->emit( brw );
357 }
358 }
359
360 accumulate_state(&examined, &atom->dirty);
361
362 /* generated = (prev ^ state)
363 * if (examined & generated)
364 * fail;
365 */
366 xor_states(&generated, &prev, state);
367 assert(!check_state(&examined, &generated));
368 prev = *state;
369 }
370 }
371 else {
372 for (i = 0; i < Elements(atoms); i++) {
373 const struct brw_tracked_state *atom = atoms[i];
374
375 if (brw->intel.Fallback)
376 break;
377
378 if (check_state(state, &atom->dirty)) {
379 if (atom->emit) {
380 atom->emit( brw );
381 }
382 }
383 }
384 }
385
386 if (INTEL_DEBUG & DEBUG_STATE) {
387 brw_update_dirty_count(mesa_bits, state->mesa);
388 brw_update_dirty_count(brw_bits, state->brw);
389 brw_update_dirty_count(cache_bits, state->cache);
390 if (dirty_count++ % 1000 == 0) {
391 brw_print_dirty_count(mesa_bits, state->mesa);
392 brw_print_dirty_count(brw_bits, state->brw);
393 brw_print_dirty_count(cache_bits, state->cache);
394 fprintf(stderr, "\n");
395 }
396 }
397
398 if (!brw->intel.Fallback)
399 memset(state, 0, sizeof(*state));
400 }