i965: Remove unnecessary header.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 static const struct brw_tracked_state *gen4_atoms[] =
46 {
47 &brw_check_fallback,
48
49 &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_unit,
64
65 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
66 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
67
68 &brw_vs_surfaces, /* must do before unit */
69 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
70 &brw_wm_surfaces, /* must do before samplers and unit */
71 &brw_wm_binding_table,
72 &brw_wm_samplers,
73
74 &brw_wm_unit,
75 &brw_sf_vp,
76 &brw_sf_unit,
77 &brw_vs_unit, /* always required, enabled or not */
78 &brw_clip_unit,
79 &brw_gs_unit,
80
81 /* Command packets:
82 */
83 &brw_invarient_state,
84 &brw_state_base_address,
85
86 &brw_binding_table_pointers,
87 &brw_blend_constant_color,
88
89 &brw_depthbuffer,
90
91 &brw_polygon_stipple,
92 &brw_polygon_stipple_offset,
93
94 &brw_line_stipple,
95 &brw_aa_line_parameters,
96
97 &brw_psp_urb_cbs,
98
99 &brw_drawing_rect,
100 &brw_indices,
101 &brw_index_buffer,
102 &brw_vertices,
103
104 &brw_constant_buffer
105 };
106
107 const struct brw_tracked_state *gen6_atoms[] =
108 {
109 &brw_check_fallback,
110
111 &brw_wm_input_sizes,
112 &brw_vs_prog,
113 &brw_gs_prog,
114 &brw_wm_prog,
115
116 &gen6_clip_vp,
117 &gen6_sf_vp,
118
119 /* Command packets: */
120 &brw_invarient_state,
121
122 &gen6_viewport_state, /* must do after *_vp stages */
123
124 &gen6_urb,
125 &gen6_blend_state, /* must do before cc unit */
126 &gen6_color_calc_state, /* must do before cc unit */
127 &gen6_depth_stencil_state, /* must do before cc unit */
128 &gen6_cc_state_pointers,
129
130 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
131 &gen6_wm_constants, /* Before wm_surfaces and constant_buffer */
132
133 &brw_vs_surfaces, /* must do before unit */
134 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
135 &brw_wm_surfaces, /* must do before samplers and unit */
136 &brw_wm_binding_table,
137
138 &brw_wm_samplers,
139 &gen6_sampler_state,
140
141 &gen6_vs_state,
142 &gen6_gs_state,
143 &gen6_clip_state,
144 &gen6_sf_state,
145 &gen6_wm_state,
146
147 &gen6_scissor_state,
148
149 &brw_state_base_address,
150
151 &gen6_binding_table_pointers,
152
153 &brw_depthbuffer,
154
155 &brw_polygon_stipple,
156 &brw_polygon_stipple_offset,
157
158 &brw_line_stipple,
159 &brw_aa_line_parameters,
160
161 &brw_drawing_rect,
162
163 &brw_indices,
164 &brw_index_buffer,
165 &brw_vertices,
166 };
167
168 void brw_init_state( struct brw_context *brw )
169 {
170 brw_init_caches(brw);
171 }
172
173
174 void brw_destroy_state( struct brw_context *brw )
175 {
176 brw_destroy_caches(brw);
177 brw_destroy_batch_cache(brw);
178 }
179
180 /***********************************************************************
181 */
182
183 static GLboolean check_state( const struct brw_state_flags *a,
184 const struct brw_state_flags *b )
185 {
186 return ((a->mesa & b->mesa) ||
187 (a->brw & b->brw) ||
188 (a->cache & b->cache));
189 }
190
191 static void accumulate_state( struct brw_state_flags *a,
192 const struct brw_state_flags *b )
193 {
194 a->mesa |= b->mesa;
195 a->brw |= b->brw;
196 a->cache |= b->cache;
197 }
198
199
200 static void xor_states( struct brw_state_flags *result,
201 const struct brw_state_flags *a,
202 const struct brw_state_flags *b )
203 {
204 result->mesa = a->mesa ^ b->mesa;
205 result->brw = a->brw ^ b->brw;
206 result->cache = a->cache ^ b->cache;
207 }
208
209 void
210 brw_clear_validated_bos(struct brw_context *brw)
211 {
212 int i;
213
214 /* Clear the last round of validated bos */
215 for (i = 0; i < brw->state.validated_bo_count; i++) {
216 drm_intel_bo_unreference(brw->state.validated_bos[i]);
217 brw->state.validated_bos[i] = NULL;
218 }
219 brw->state.validated_bo_count = 0;
220 }
221
222 struct dirty_bit_map {
223 uint32_t bit;
224 char *name;
225 uint32_t count;
226 };
227
228 #define DEFINE_BIT(name) {name, #name, 0}
229
230 static struct dirty_bit_map mesa_bits[] = {
231 DEFINE_BIT(_NEW_MODELVIEW),
232 DEFINE_BIT(_NEW_PROJECTION),
233 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
234 DEFINE_BIT(_NEW_COLOR_MATRIX),
235 DEFINE_BIT(_NEW_ACCUM),
236 DEFINE_BIT(_NEW_COLOR),
237 DEFINE_BIT(_NEW_DEPTH),
238 DEFINE_BIT(_NEW_EVAL),
239 DEFINE_BIT(_NEW_FOG),
240 DEFINE_BIT(_NEW_HINT),
241 DEFINE_BIT(_NEW_LIGHT),
242 DEFINE_BIT(_NEW_LINE),
243 DEFINE_BIT(_NEW_PIXEL),
244 DEFINE_BIT(_NEW_POINT),
245 DEFINE_BIT(_NEW_POLYGON),
246 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
247 DEFINE_BIT(_NEW_SCISSOR),
248 DEFINE_BIT(_NEW_STENCIL),
249 DEFINE_BIT(_NEW_TEXTURE),
250 DEFINE_BIT(_NEW_TRANSFORM),
251 DEFINE_BIT(_NEW_VIEWPORT),
252 DEFINE_BIT(_NEW_PACKUNPACK),
253 DEFINE_BIT(_NEW_ARRAY),
254 DEFINE_BIT(_NEW_RENDERMODE),
255 DEFINE_BIT(_NEW_BUFFERS),
256 DEFINE_BIT(_NEW_MULTISAMPLE),
257 DEFINE_BIT(_NEW_TRACK_MATRIX),
258 DEFINE_BIT(_NEW_PROGRAM),
259 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
260 {0, 0, 0}
261 };
262
263 static struct dirty_bit_map brw_bits[] = {
264 DEFINE_BIT(BRW_NEW_URB_FENCE),
265 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
266 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
267 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
268 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
269 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
270 DEFINE_BIT(BRW_NEW_PRIMITIVE),
271 DEFINE_BIT(BRW_NEW_CONTEXT),
272 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
273 DEFINE_BIT(BRW_NEW_PSP),
274 DEFINE_BIT(BRW_NEW_WM_SURFACES),
275 DEFINE_BIT(BRW_NEW_BINDING_TABLE),
276 DEFINE_BIT(BRW_NEW_INDICES),
277 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
278 DEFINE_BIT(BRW_NEW_VERTICES),
279 DEFINE_BIT(BRW_NEW_BATCH),
280 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
281 {0, 0, 0}
282 };
283
284 static struct dirty_bit_map cache_bits[] = {
285 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
286 DEFINE_BIT(CACHE_NEW_CC_VP),
287 DEFINE_BIT(CACHE_NEW_CC_UNIT),
288 DEFINE_BIT(CACHE_NEW_WM_PROG),
289 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
290 DEFINE_BIT(CACHE_NEW_SAMPLER),
291 DEFINE_BIT(CACHE_NEW_WM_UNIT),
292 DEFINE_BIT(CACHE_NEW_SF_PROG),
293 DEFINE_BIT(CACHE_NEW_SF_VP),
294 DEFINE_BIT(CACHE_NEW_SF_UNIT),
295 DEFINE_BIT(CACHE_NEW_VS_UNIT),
296 DEFINE_BIT(CACHE_NEW_VS_PROG),
297 DEFINE_BIT(CACHE_NEW_GS_UNIT),
298 DEFINE_BIT(CACHE_NEW_GS_PROG),
299 DEFINE_BIT(CACHE_NEW_CLIP_VP),
300 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
301 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
302 {0, 0, 0}
303 };
304
305
306 static void
307 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
308 {
309 int i;
310
311 for (i = 0; i < 32; i++) {
312 if (bit_map[i].bit == 0)
313 return;
314
315 if (bit_map[i].bit & bits)
316 bit_map[i].count++;
317 }
318 }
319
320 static void
321 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
322 {
323 int i;
324
325 for (i = 0; i < 32; i++) {
326 if (bit_map[i].bit == 0)
327 return;
328
329 fprintf(stderr, "0x%08x: %12d (%s)\n",
330 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
331 }
332 }
333
334 /***********************************************************************
335 * Emit all state:
336 */
337 void brw_validate_state( struct brw_context *brw )
338 {
339 GLcontext *ctx = &brw->intel.ctx;
340 struct intel_context *intel = &brw->intel;
341 struct brw_state_flags *state = &brw->state.dirty;
342 GLuint i;
343 const struct brw_tracked_state **atoms;
344 int num_atoms;
345
346 brw_clear_validated_bos(brw);
347
348 state->mesa |= brw->intel.NewGLState;
349 brw->intel.NewGLState = 0;
350
351 brw_add_validated_bo(brw, intel->batch->buf);
352
353 if (intel->gen >= 6) {
354 atoms = gen6_atoms;
355 num_atoms = ARRAY_SIZE(gen6_atoms);
356 } else {
357 atoms = gen4_atoms;
358 num_atoms = ARRAY_SIZE(gen4_atoms);
359 }
360
361 if (brw->emit_state_always) {
362 state->mesa |= ~0;
363 state->brw |= ~0;
364 state->cache |= ~0;
365 }
366
367 if (brw->fragment_program != ctx->FragmentProgram._Current) {
368 brw->fragment_program = ctx->FragmentProgram._Current;
369 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
370 }
371
372 if (brw->vertex_program != ctx->VertexProgram._Current) {
373 brw->vertex_program = ctx->VertexProgram._Current;
374 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
375 }
376
377 if (state->mesa == 0 &&
378 state->cache == 0 &&
379 state->brw == 0)
380 return;
381
382 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
383 brw_clear_batch_cache(brw);
384
385 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
386
387 /* do prepare stage for all atoms */
388 for (i = 0; i < num_atoms; i++) {
389 const struct brw_tracked_state *atom = atoms[i];
390
391 if (brw->intel.Fallback)
392 break;
393
394 if (check_state(state, &atom->dirty)) {
395 if (atom->prepare) {
396 atom->prepare(brw);
397 }
398 }
399 }
400
401 intel_check_front_buffer_rendering(intel);
402
403 /* Make sure that the textures which are referenced by the current
404 * brw fragment program are actually present/valid.
405 * If this fails, we can experience GPU lock-ups.
406 */
407 {
408 const struct brw_fragment_program *fp;
409 fp = brw_fragment_program_const(brw->fragment_program);
410 if (fp) {
411 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
412 == fp->tex_units_used);
413 }
414 }
415 }
416
417
418 void brw_upload_state(struct brw_context *brw)
419 {
420 struct intel_context *intel = &brw->intel;
421 struct brw_state_flags *state = &brw->state.dirty;
422 int i;
423 static int dirty_count = 0;
424 const struct brw_tracked_state **atoms;
425 int num_atoms;
426
427 if (intel->gen >= 6) {
428 atoms = gen6_atoms;
429 num_atoms = ARRAY_SIZE(gen6_atoms);
430 } else {
431 atoms = gen4_atoms;
432 num_atoms = ARRAY_SIZE(gen4_atoms);
433 }
434
435 brw_clear_validated_bos(brw);
436
437 if (INTEL_DEBUG) {
438 /* Debug version which enforces various sanity checks on the
439 * state flags which are generated and checked to help ensure
440 * state atoms are ordered correctly in the list.
441 */
442 struct brw_state_flags examined, prev;
443 memset(&examined, 0, sizeof(examined));
444 prev = *state;
445
446 for (i = 0; i < num_atoms; i++) {
447 const struct brw_tracked_state *atom = atoms[i];
448 struct brw_state_flags generated;
449
450 assert(atom->dirty.mesa ||
451 atom->dirty.brw ||
452 atom->dirty.cache);
453
454 if (brw->intel.Fallback)
455 break;
456
457 if (check_state(state, &atom->dirty)) {
458 if (atom->emit) {
459 atom->emit( brw );
460 }
461 }
462
463 accumulate_state(&examined, &atom->dirty);
464
465 /* generated = (prev ^ state)
466 * if (examined & generated)
467 * fail;
468 */
469 xor_states(&generated, &prev, state);
470 assert(!check_state(&examined, &generated));
471 prev = *state;
472 }
473 }
474 else {
475 for (i = 0; i < num_atoms; i++) {
476 const struct brw_tracked_state *atom = atoms[i];
477
478 if (brw->intel.Fallback)
479 break;
480
481 if (check_state(state, &atom->dirty)) {
482 if (atom->emit) {
483 atom->emit( brw );
484 }
485 }
486 }
487 }
488
489 if (INTEL_DEBUG & DEBUG_STATE) {
490 brw_update_dirty_count(mesa_bits, state->mesa);
491 brw_update_dirty_count(brw_bits, state->brw);
492 brw_update_dirty_count(cache_bits, state->cache);
493 if (dirty_count++ % 1000 == 0) {
494 brw_print_dirty_count(mesa_bits, state->mesa);
495 brw_print_dirty_count(brw_bits, state->brw);
496 brw_print_dirty_count(cache_bits, state->cache);
497 fprintf(stderr, "\n");
498 }
499 }
500
501 if (!brw->intel.Fallback)
502 memset(state, 0, sizeof(*state));
503 }