Merge remote branch 'origin/master' into pipe-video
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 static const struct brw_tracked_state *gen4_atoms[] =
46 {
47 &brw_check_fallback,
48
49 &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_unit,
64
65 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
66 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
67
68 &brw_vs_surfaces, /* must do before unit */
69 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
70 &brw_wm_surfaces, /* must do before samplers and unit */
71 &brw_wm_binding_table,
72 &brw_wm_samplers,
73
74 &brw_wm_unit,
75 &brw_sf_vp,
76 &brw_sf_unit,
77 &brw_vs_unit, /* always required, enabled or not */
78 &brw_clip_unit,
79 &brw_gs_unit,
80
81 /* Command packets:
82 */
83 &brw_invarient_state,
84 &brw_state_base_address,
85
86 &brw_binding_table_pointers,
87 &brw_blend_constant_color,
88
89 &brw_depthbuffer,
90
91 &brw_polygon_stipple,
92 &brw_polygon_stipple_offset,
93
94 &brw_line_stipple,
95 &brw_aa_line_parameters,
96
97 &brw_psp_urb_cbs,
98
99 &brw_drawing_rect,
100 &brw_indices,
101 &brw_index_buffer,
102 &brw_vertices,
103
104 &brw_constant_buffer
105 };
106
107 const struct brw_tracked_state *gen6_atoms[] =
108 {
109 &brw_check_fallback,
110
111 &brw_wm_input_sizes,
112 &brw_vs_prog,
113 &brw_gs_prog,
114 &brw_wm_prog,
115
116 &gen6_clip_vp,
117 &gen6_sf_vp,
118
119 /* Command packets: */
120 &brw_invarient_state,
121
122 &gen6_viewport_state, /* must do after *_vp stages */
123
124 &gen6_urb,
125 &gen6_blend_state, /* must do before cc unit */
126 &gen6_color_calc_state, /* must do before cc unit */
127 &gen6_depth_stencil_state, /* must do before cc unit */
128 &gen6_cc_state_pointers,
129
130 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
131 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
132 &gen6_wm_constants, /* Before wm_surfaces and constant_buffer */
133
134 &brw_vs_surfaces, /* must do before unit */
135 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
136 &brw_wm_surfaces, /* must do before samplers and unit */
137 &brw_wm_binding_table,
138
139 &brw_wm_samplers,
140 &gen6_sampler_state,
141
142 &gen6_vs_state,
143 &gen6_gs_state,
144 &gen6_clip_state,
145 &gen6_sf_state,
146 &gen6_wm_state,
147
148 &gen6_scissor_state,
149 &gen6_scissor_state_pointers,
150
151 &brw_state_base_address,
152
153 &gen6_binding_table_pointers,
154
155 &brw_depthbuffer,
156
157 &brw_polygon_stipple,
158 &brw_polygon_stipple_offset,
159
160 &brw_line_stipple,
161 &brw_aa_line_parameters,
162
163 &brw_drawing_rect,
164
165 &brw_indices,
166 &brw_index_buffer,
167 &brw_vertices,
168 };
169
170 void brw_init_state( struct brw_context *brw )
171 {
172 brw_init_caches(brw);
173 }
174
175
176 void brw_destroy_state( struct brw_context *brw )
177 {
178 brw_destroy_caches(brw);
179 brw_destroy_batch_cache(brw);
180 }
181
182 /***********************************************************************
183 */
184
185 static GLboolean check_state( const struct brw_state_flags *a,
186 const struct brw_state_flags *b )
187 {
188 return ((a->mesa & b->mesa) ||
189 (a->brw & b->brw) ||
190 (a->cache & b->cache));
191 }
192
193 static void accumulate_state( struct brw_state_flags *a,
194 const struct brw_state_flags *b )
195 {
196 a->mesa |= b->mesa;
197 a->brw |= b->brw;
198 a->cache |= b->cache;
199 }
200
201
202 static void xor_states( struct brw_state_flags *result,
203 const struct brw_state_flags *a,
204 const struct brw_state_flags *b )
205 {
206 result->mesa = a->mesa ^ b->mesa;
207 result->brw = a->brw ^ b->brw;
208 result->cache = a->cache ^ b->cache;
209 }
210
211 void
212 brw_clear_validated_bos(struct brw_context *brw)
213 {
214 int i;
215
216 /* Clear the last round of validated bos */
217 for (i = 0; i < brw->state.validated_bo_count; i++) {
218 drm_intel_bo_unreference(brw->state.validated_bos[i]);
219 brw->state.validated_bos[i] = NULL;
220 }
221 brw->state.validated_bo_count = 0;
222 }
223
224 struct dirty_bit_map {
225 uint32_t bit;
226 char *name;
227 uint32_t count;
228 };
229
230 #define DEFINE_BIT(name) {name, #name, 0}
231
232 static struct dirty_bit_map mesa_bits[] = {
233 DEFINE_BIT(_NEW_MODELVIEW),
234 DEFINE_BIT(_NEW_PROJECTION),
235 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
236 DEFINE_BIT(_NEW_ACCUM),
237 DEFINE_BIT(_NEW_COLOR),
238 DEFINE_BIT(_NEW_DEPTH),
239 DEFINE_BIT(_NEW_EVAL),
240 DEFINE_BIT(_NEW_FOG),
241 DEFINE_BIT(_NEW_HINT),
242 DEFINE_BIT(_NEW_LIGHT),
243 DEFINE_BIT(_NEW_LINE),
244 DEFINE_BIT(_NEW_PIXEL),
245 DEFINE_BIT(_NEW_POINT),
246 DEFINE_BIT(_NEW_POLYGON),
247 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
248 DEFINE_BIT(_NEW_SCISSOR),
249 DEFINE_BIT(_NEW_STENCIL),
250 DEFINE_BIT(_NEW_TEXTURE),
251 DEFINE_BIT(_NEW_TRANSFORM),
252 DEFINE_BIT(_NEW_VIEWPORT),
253 DEFINE_BIT(_NEW_PACKUNPACK),
254 DEFINE_BIT(_NEW_ARRAY),
255 DEFINE_BIT(_NEW_RENDERMODE),
256 DEFINE_BIT(_NEW_BUFFERS),
257 DEFINE_BIT(_NEW_MULTISAMPLE),
258 DEFINE_BIT(_NEW_TRACK_MATRIX),
259 DEFINE_BIT(_NEW_PROGRAM),
260 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
261 {0, 0, 0}
262 };
263
264 static struct dirty_bit_map brw_bits[] = {
265 DEFINE_BIT(BRW_NEW_URB_FENCE),
266 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
267 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
268 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
269 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
270 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
271 DEFINE_BIT(BRW_NEW_PRIMITIVE),
272 DEFINE_BIT(BRW_NEW_CONTEXT),
273 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
274 DEFINE_BIT(BRW_NEW_PSP),
275 DEFINE_BIT(BRW_NEW_WM_SURFACES),
276 DEFINE_BIT(BRW_NEW_BINDING_TABLE),
277 DEFINE_BIT(BRW_NEW_INDICES),
278 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
279 DEFINE_BIT(BRW_NEW_VERTICES),
280 DEFINE_BIT(BRW_NEW_BATCH),
281 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
282 {0, 0, 0}
283 };
284
285 static struct dirty_bit_map cache_bits[] = {
286 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
287 DEFINE_BIT(CACHE_NEW_CC_VP),
288 DEFINE_BIT(CACHE_NEW_CC_UNIT),
289 DEFINE_BIT(CACHE_NEW_WM_PROG),
290 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
291 DEFINE_BIT(CACHE_NEW_SAMPLER),
292 DEFINE_BIT(CACHE_NEW_WM_UNIT),
293 DEFINE_BIT(CACHE_NEW_SF_PROG),
294 DEFINE_BIT(CACHE_NEW_SF_VP),
295 DEFINE_BIT(CACHE_NEW_SF_UNIT),
296 DEFINE_BIT(CACHE_NEW_VS_UNIT),
297 DEFINE_BIT(CACHE_NEW_VS_PROG),
298 DEFINE_BIT(CACHE_NEW_GS_UNIT),
299 DEFINE_BIT(CACHE_NEW_GS_PROG),
300 DEFINE_BIT(CACHE_NEW_CLIP_VP),
301 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
302 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
303 {0, 0, 0}
304 };
305
306
307 static void
308 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
309 {
310 int i;
311
312 for (i = 0; i < 32; i++) {
313 if (bit_map[i].bit == 0)
314 return;
315
316 if (bit_map[i].bit & bits)
317 bit_map[i].count++;
318 }
319 }
320
321 static void
322 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
323 {
324 int i;
325
326 for (i = 0; i < 32; i++) {
327 if (bit_map[i].bit == 0)
328 return;
329
330 fprintf(stderr, "0x%08x: %12d (%s)\n",
331 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
332 }
333 }
334
335 /***********************************************************************
336 * Emit all state:
337 */
338 void brw_validate_state( struct brw_context *brw )
339 {
340 struct gl_context *ctx = &brw->intel.ctx;
341 struct intel_context *intel = &brw->intel;
342 struct brw_state_flags *state = &brw->state.dirty;
343 GLuint i;
344 const struct brw_tracked_state **atoms;
345 int num_atoms;
346
347 brw_clear_validated_bos(brw);
348
349 state->mesa |= brw->intel.NewGLState;
350 brw->intel.NewGLState = 0;
351
352 brw_add_validated_bo(brw, intel->batch->buf);
353
354 if (intel->gen >= 6) {
355 atoms = gen6_atoms;
356 num_atoms = ARRAY_SIZE(gen6_atoms);
357 } else {
358 atoms = gen4_atoms;
359 num_atoms = ARRAY_SIZE(gen4_atoms);
360 }
361
362 if (brw->emit_state_always) {
363 state->mesa |= ~0;
364 state->brw |= ~0;
365 state->cache |= ~0;
366 }
367
368 if (brw->fragment_program != ctx->FragmentProgram._Current) {
369 brw->fragment_program = ctx->FragmentProgram._Current;
370 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
371 }
372
373 if (brw->vertex_program != ctx->VertexProgram._Current) {
374 brw->vertex_program = ctx->VertexProgram._Current;
375 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
376 }
377
378 if (state->mesa == 0 &&
379 state->cache == 0 &&
380 state->brw == 0)
381 return;
382
383 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
384 brw_clear_batch_cache(brw);
385
386 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
387
388 /* do prepare stage for all atoms */
389 for (i = 0; i < num_atoms; i++) {
390 const struct brw_tracked_state *atom = atoms[i];
391
392 if (brw->intel.Fallback)
393 break;
394
395 if (check_state(state, &atom->dirty)) {
396 if (atom->prepare) {
397 atom->prepare(brw);
398 }
399 }
400 }
401
402 intel_check_front_buffer_rendering(intel);
403
404 /* Make sure that the textures which are referenced by the current
405 * brw fragment program are actually present/valid.
406 * If this fails, we can experience GPU lock-ups.
407 */
408 {
409 const struct brw_fragment_program *fp;
410 fp = brw_fragment_program_const(brw->fragment_program);
411 if (fp) {
412 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
413 == fp->tex_units_used);
414 }
415 }
416 }
417
418
419 void brw_upload_state(struct brw_context *brw)
420 {
421 struct intel_context *intel = &brw->intel;
422 struct brw_state_flags *state = &brw->state.dirty;
423 int i;
424 static int dirty_count = 0;
425 const struct brw_tracked_state **atoms;
426 int num_atoms;
427
428 if (intel->gen >= 6) {
429 atoms = gen6_atoms;
430 num_atoms = ARRAY_SIZE(gen6_atoms);
431 } else {
432 atoms = gen4_atoms;
433 num_atoms = ARRAY_SIZE(gen4_atoms);
434 }
435
436 brw_clear_validated_bos(brw);
437
438 if (unlikely(INTEL_DEBUG)) {
439 /* Debug version which enforces various sanity checks on the
440 * state flags which are generated and checked to help ensure
441 * state atoms are ordered correctly in the list.
442 */
443 struct brw_state_flags examined, prev;
444 memset(&examined, 0, sizeof(examined));
445 prev = *state;
446
447 for (i = 0; i < num_atoms; i++) {
448 const struct brw_tracked_state *atom = atoms[i];
449 struct brw_state_flags generated;
450
451 assert(atom->dirty.mesa ||
452 atom->dirty.brw ||
453 atom->dirty.cache);
454
455 if (brw->intel.Fallback)
456 break;
457
458 if (check_state(state, &atom->dirty)) {
459 if (atom->emit) {
460 atom->emit( brw );
461 }
462 }
463
464 accumulate_state(&examined, &atom->dirty);
465
466 /* generated = (prev ^ state)
467 * if (examined & generated)
468 * fail;
469 */
470 xor_states(&generated, &prev, state);
471 assert(!check_state(&examined, &generated));
472 prev = *state;
473 }
474 }
475 else {
476 for (i = 0; i < num_atoms; i++) {
477 const struct brw_tracked_state *atom = atoms[i];
478
479 if (brw->intel.Fallback)
480 break;
481
482 if (check_state(state, &atom->dirty)) {
483 if (atom->emit) {
484 atom->emit( brw );
485 }
486 }
487 }
488 }
489
490 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
491 brw_update_dirty_count(mesa_bits, state->mesa);
492 brw_update_dirty_count(brw_bits, state->brw);
493 brw_update_dirty_count(cache_bits, state->cache);
494 if (dirty_count++ % 1000 == 0) {
495 brw_print_dirty_count(mesa_bits, state->mesa);
496 brw_print_dirty_count(brw_bits, state->brw);
497 brw_print_dirty_count(cache_bits, state->cache);
498 fprintf(stderr, "\n");
499 }
500 }
501
502 if (!brw->intel.Fallback)
503 memset(state, 0, sizeof(*state));
504 }