Merge commit 'origin/gallium-0.1'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37
38 /* This is used to initialize brw->state.atoms[]. We could use this
39 * list directly except for a single atom, brw_constant_buffer, which
40 * has a .dirty value which changes according to the parameters of the
41 * current fragment and vertex programs, and so cannot be a static
42 * value.
43 */
44 const struct brw_tracked_state *atoms[] =
45 {
46 &brw_check_fallback,
47
48 &brw_wm_input_sizes,
49 &brw_vs_prog,
50 &brw_gs_prog,
51 &brw_clip_prog,
52 &brw_sf_prog,
53 &brw_wm_prog,
54
55 /* Once all the programs are done, we know how large urb entry
56 * sizes need to be and can decide if we need to change the urb
57 * layout.
58 */
59 &brw_curbe_offsets,
60 &brw_recalculate_urb_fence,
61
62
63 &brw_cc_vp,
64 &brw_cc_unit,
65
66 &brw_wm_surfaces, /* must do before samplers */
67 &brw_wm_samplers,
68
69 &brw_wm_unit,
70 &brw_sf_vp,
71 &brw_sf_unit,
72 &brw_vs_unit, /* always required, enabled or not */
73 &brw_clip_unit,
74 &brw_gs_unit,
75
76 /* Command packets:
77 */
78 &brw_invarient_state,
79 &brw_state_base_address,
80
81 &brw_binding_table_pointers,
82 &brw_blend_constant_color,
83
84 &brw_depthbuffer,
85
86 &brw_polygon_stipple,
87 &brw_polygon_stipple_offset,
88
89 &brw_line_stipple,
90 &brw_aa_line_parameters,
91 /* Ordering of the commands below is documented as fixed.
92 */
93 #if 0
94 &brw_pipelined_state_pointers,
95 &brw_urb_fence,
96 &brw_constant_buffer_state,
97 #else
98 &brw_psp_urb_cbs,
99 #endif
100
101 &brw_drawing_rect,
102 &brw_indices,
103 &brw_vertices,
104
105 NULL, /* brw_constant_buffer */
106 };
107
108
109 void brw_init_state( struct brw_context *brw )
110 {
111 GLuint i;
112
113 brw_init_cache(brw);
114
115 brw->state.atoms = _mesa_malloc(sizeof(atoms));
116 brw->state.nr_atoms = sizeof(atoms)/sizeof(*atoms);
117 _mesa_memcpy(brw->state.atoms, atoms, sizeof(atoms));
118
119 /* Patch in a pointer to the dynamic state atom:
120 */
121 for (i = 0; i < brw->state.nr_atoms; i++)
122 if (brw->state.atoms[i] == NULL)
123 brw->state.atoms[i] = &brw->curbe.tracked_state;
124
125 _mesa_memcpy(&brw->curbe.tracked_state,
126 &brw_constant_buffer,
127 sizeof(brw_constant_buffer));
128 }
129
130
131 void brw_destroy_state( struct brw_context *brw )
132 {
133 if (brw->state.atoms) {
134 _mesa_free(brw->state.atoms);
135 brw->state.atoms = NULL;
136 }
137
138 brw_destroy_cache(brw);
139 brw_destroy_batch_cache(brw);
140 }
141
142 /***********************************************************************
143 */
144
145 static GLboolean check_state( const struct brw_state_flags *a,
146 const struct brw_state_flags *b )
147 {
148 return ((a->mesa & b->mesa) ||
149 (a->brw & b->brw) ||
150 (a->cache & b->cache));
151 }
152
153 static void accumulate_state( struct brw_state_flags *a,
154 const struct brw_state_flags *b )
155 {
156 a->mesa |= b->mesa;
157 a->brw |= b->brw;
158 a->cache |= b->cache;
159 }
160
161
162 static void xor_states( struct brw_state_flags *result,
163 const struct brw_state_flags *a,
164 const struct brw_state_flags *b )
165 {
166 result->mesa = a->mesa ^ b->mesa;
167 result->brw = a->brw ^ b->brw;
168 result->cache = a->cache ^ b->cache;
169 }
170
171 static void
172 brw_clear_validated_bos(struct brw_context *brw)
173 {
174 int i;
175
176 /* Clear the last round of validated bos */
177 for (i = 0; i < brw->state.validated_bo_count; i++) {
178 dri_bo_unreference(brw->state.validated_bos[i]);
179 brw->state.validated_bos[i] = NULL;
180 }
181 brw->state.validated_bo_count = 0;
182 }
183
184 struct dirty_bit_map {
185 uint32_t bit;
186 char *name;
187 uint32_t count;
188 };
189
190 #define DEFINE_BIT(name) {name, #name, 0}
191
192 static struct dirty_bit_map mesa_bits[] = {
193 DEFINE_BIT(_NEW_MODELVIEW),
194 DEFINE_BIT(_NEW_PROJECTION),
195 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
196 DEFINE_BIT(_NEW_COLOR_MATRIX),
197 DEFINE_BIT(_NEW_ACCUM),
198 DEFINE_BIT(_NEW_COLOR),
199 DEFINE_BIT(_NEW_DEPTH),
200 DEFINE_BIT(_NEW_EVAL),
201 DEFINE_BIT(_NEW_FOG),
202 DEFINE_BIT(_NEW_HINT),
203 DEFINE_BIT(_NEW_LIGHT),
204 DEFINE_BIT(_NEW_LINE),
205 DEFINE_BIT(_NEW_PIXEL),
206 DEFINE_BIT(_NEW_POINT),
207 DEFINE_BIT(_NEW_POLYGON),
208 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
209 DEFINE_BIT(_NEW_SCISSOR),
210 DEFINE_BIT(_NEW_STENCIL),
211 DEFINE_BIT(_NEW_TEXTURE),
212 DEFINE_BIT(_NEW_TRANSFORM),
213 DEFINE_BIT(_NEW_VIEWPORT),
214 DEFINE_BIT(_NEW_PACKUNPACK),
215 DEFINE_BIT(_NEW_ARRAY),
216 DEFINE_BIT(_NEW_RENDERMODE),
217 DEFINE_BIT(_NEW_BUFFERS),
218 DEFINE_BIT(_NEW_MULTISAMPLE),
219 DEFINE_BIT(_NEW_TRACK_MATRIX),
220 DEFINE_BIT(_NEW_PROGRAM),
221 {0, 0, 0}
222 };
223
224 static struct dirty_bit_map brw_bits[] = {
225 DEFINE_BIT(BRW_NEW_URB_FENCE),
226 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
227 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
228 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
229 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
230 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
231 DEFINE_BIT(BRW_NEW_PRIMITIVE),
232 DEFINE_BIT(BRW_NEW_CONTEXT),
233 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
234 DEFINE_BIT(BRW_NEW_INPUT_VARYING),
235 DEFINE_BIT(BRW_NEW_PSP),
236 DEFINE_BIT(BRW_NEW_FENCE),
237 DEFINE_BIT(BRW_NEW_INDICES),
238 DEFINE_BIT(BRW_NEW_VERTICES),
239 DEFINE_BIT(BRW_NEW_BATCH),
240 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
241 {0, 0, 0}
242 };
243
244 static struct dirty_bit_map cache_bits[] = {
245 DEFINE_BIT(CACHE_NEW_CC_VP),
246 DEFINE_BIT(CACHE_NEW_CC_UNIT),
247 DEFINE_BIT(CACHE_NEW_WM_PROG),
248 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
249 DEFINE_BIT(CACHE_NEW_SAMPLER),
250 DEFINE_BIT(CACHE_NEW_WM_UNIT),
251 DEFINE_BIT(CACHE_NEW_SF_PROG),
252 DEFINE_BIT(CACHE_NEW_SF_VP),
253 DEFINE_BIT(CACHE_NEW_SF_UNIT),
254 DEFINE_BIT(CACHE_NEW_VS_UNIT),
255 DEFINE_BIT(CACHE_NEW_VS_PROG),
256 DEFINE_BIT(CACHE_NEW_GS_UNIT),
257 DEFINE_BIT(CACHE_NEW_GS_PROG),
258 DEFINE_BIT(CACHE_NEW_CLIP_VP),
259 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
260 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
261 DEFINE_BIT(CACHE_NEW_SURFACE),
262 DEFINE_BIT(CACHE_NEW_SURF_BIND),
263 {0, 0, 0}
264 };
265
266
267 static void
268 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
269 {
270 int i;
271
272 for (i = 0; i < 32; i++) {
273 if (bit_map[i].bit == 0)
274 return;
275
276 if (bit_map[i].bit & bits)
277 bit_map[i].count++;
278 }
279 }
280
281 static void
282 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
283 {
284 int i;
285
286 for (i = 0; i < 32; i++) {
287 if (bit_map[i].bit == 0)
288 return;
289
290 fprintf(stderr, "0x%08x: %12d (%s)\n",
291 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
292 }
293 }
294
295 /***********************************************************************
296 * Emit all state:
297 */
298 void brw_validate_state( struct brw_context *brw )
299 {
300 GLcontext *ctx = &brw->intel.ctx;
301 struct intel_context *intel = &brw->intel;
302 struct brw_state_flags *state = &brw->state.dirty;
303 GLuint i;
304
305 brw_clear_validated_bos(brw);
306
307 state->mesa |= brw->intel.NewGLState;
308 brw->intel.NewGLState = 0;
309
310 brw_add_validated_bo(brw, intel->batch->buf);
311
312 if (brw->emit_state_always) {
313 state->mesa |= ~0;
314 state->brw |= ~0;
315 }
316
317 if (brw->fragment_program != ctx->FragmentProgram._Current) {
318 brw->fragment_program = ctx->FragmentProgram._Current;
319 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
320 }
321
322 if (brw->vertex_program != ctx->VertexProgram._Current) {
323 brw->vertex_program = ctx->VertexProgram._Current;
324 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
325 }
326
327 if (state->mesa == 0 &&
328 state->cache == 0 &&
329 state->brw == 0)
330 return;
331
332 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
333 brw_clear_batch_cache_flush(brw);
334
335 brw->intel.Fallback = 0;
336
337 /* do prepare stage for all atoms */
338 for (i = 0; i < Elements(atoms); i++) {
339 const struct brw_tracked_state *atom = brw->state.atoms[i];
340
341 if (brw->intel.Fallback)
342 break;
343
344 if (check_state(state, &atom->dirty)) {
345 if (atom->prepare) {
346 atom->prepare(brw);
347 }
348 }
349 }
350 }
351
352
353 void brw_upload_state(struct brw_context *brw)
354 {
355 struct brw_state_flags *state = &brw->state.dirty;
356 int i;
357 static int dirty_count = 0;
358
359 brw_clear_validated_bos(brw);
360
361 if (INTEL_DEBUG) {
362 /* Debug version which enforces various sanity checks on the
363 * state flags which are generated and checked to help ensure
364 * state atoms are ordered correctly in the list.
365 */
366 struct brw_state_flags examined, prev;
367 _mesa_memset(&examined, 0, sizeof(examined));
368 prev = *state;
369
370 for (i = 0; i < brw->state.nr_atoms; i++) {
371 const struct brw_tracked_state *atom = brw->state.atoms[i];
372 struct brw_state_flags generated;
373
374 assert(atom->dirty.mesa ||
375 atom->dirty.brw ||
376 atom->dirty.cache);
377
378 if (brw->intel.Fallback)
379 break;
380
381 if (check_state(state, &atom->dirty)) {
382 if (atom->emit) {
383 atom->emit( brw );
384 }
385 }
386
387 accumulate_state(&examined, &atom->dirty);
388
389 /* generated = (prev ^ state)
390 * if (examined & generated)
391 * fail;
392 */
393 xor_states(&generated, &prev, state);
394 assert(!check_state(&examined, &generated));
395 prev = *state;
396 }
397 }
398 else {
399 for (i = 0; i < Elements(atoms); i++) {
400 const struct brw_tracked_state *atom = brw->state.atoms[i];
401
402 if (brw->intel.Fallback)
403 break;
404
405 if (check_state(state, &atom->dirty)) {
406 if (atom->emit) {
407 atom->emit( brw );
408 }
409 }
410 }
411 }
412
413 if (INTEL_DEBUG & DEBUG_STATE) {
414 brw_update_dirty_count(mesa_bits, state->mesa);
415 brw_update_dirty_count(brw_bits, state->brw);
416 brw_update_dirty_count(cache_bits, state->cache);
417 if (dirty_count++ % 1000 == 0) {
418 brw_print_dirty_count(mesa_bits, state->mesa);
419 brw_print_dirty_count(brw_bits, state->brw);
420 brw_print_dirty_count(cache_bits, state->cache);
421 fprintf(stderr, "\n");
422 }
423 }
424
425 if (!brw->intel.Fallback)
426 memset(state, 0, sizeof(*state));
427 }