i965: Move the CC VP to state streaming.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38
39 /* This is used to initialize brw->state.atoms[]. We could use this
40 * list directly except for a single atom, brw_constant_buffer, which
41 * has a .dirty value which changes according to the parameters of the
42 * current fragment and vertex programs, and so cannot be a static
43 * value.
44 */
45 static const struct brw_tracked_state *gen4_atoms[] =
46 {
47 &brw_check_fallback,
48
49 &brw_wm_input_sizes,
50 &brw_vs_prog,
51 &brw_gs_prog,
52 &brw_clip_prog,
53 &brw_sf_prog,
54 &brw_wm_prog,
55
56 /* Once all the programs are done, we know how large urb entry
57 * sizes need to be and can decide if we need to change the urb
58 * layout.
59 */
60 &brw_curbe_offsets,
61 &brw_recalculate_urb_fence,
62
63 &brw_cc_vp,
64 &brw_cc_unit,
65
66 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
67 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
68
69 &brw_vs_surfaces, /* must do before unit */
70 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
71 &brw_wm_surfaces, /* must do before samplers and unit */
72 &brw_wm_binding_table,
73 &brw_wm_samplers,
74
75 &brw_wm_unit,
76 &brw_sf_vp,
77 &brw_sf_unit,
78 &brw_vs_unit, /* always required, enabled or not */
79 &brw_clip_unit,
80 &brw_gs_unit,
81
82 /* Command packets:
83 */
84 &brw_invarient_state,
85 &brw_state_base_address,
86
87 &brw_binding_table_pointers,
88 &brw_blend_constant_color,
89
90 &brw_depthbuffer,
91
92 &brw_polygon_stipple,
93 &brw_polygon_stipple_offset,
94
95 &brw_line_stipple,
96 &brw_aa_line_parameters,
97
98 &brw_psp_urb_cbs,
99
100 &brw_drawing_rect,
101 &brw_indices,
102 &brw_index_buffer,
103 &brw_vertices,
104
105 &brw_constant_buffer
106 };
107
108 static const struct brw_tracked_state *gen6_atoms[] =
109 {
110 &brw_check_fallback,
111
112 &brw_wm_input_sizes,
113 &brw_vs_prog,
114 &brw_gs_prog,
115 &brw_wm_prog,
116
117 &gen6_clip_vp,
118 &gen6_sf_vp,
119
120 /* Command packets: */
121 &brw_invarient_state,
122
123 &brw_cc_vp,
124 &gen6_viewport_state, /* must do after *_vp stages */
125
126 &gen6_urb,
127 &gen6_blend_state, /* must do before cc unit */
128 &gen6_color_calc_state, /* must do before cc unit */
129 &gen6_depth_stencil_state, /* must do before cc unit */
130 &gen6_cc_state_pointers,
131
132 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
133 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
134 &gen6_vs_constants, /* Before vs_state */
135 &gen6_wm_constants, /* Before wm_state */
136
137 &brw_vs_surfaces, /* must do before unit */
138 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
139 &brw_wm_surfaces, /* must do before samplers and unit */
140 &brw_wm_binding_table,
141
142 &brw_wm_samplers,
143 &gen6_sampler_state,
144
145 &gen6_vs_state,
146 &gen6_gs_state,
147 &gen6_clip_state,
148 &gen6_sf_state,
149 &gen6_wm_state,
150
151 &gen6_scissor_state,
152 &gen6_scissor_state_pointers,
153
154 &brw_state_base_address,
155
156 &gen6_binding_table_pointers,
157
158 &brw_depthbuffer,
159
160 &brw_polygon_stipple,
161 &brw_polygon_stipple_offset,
162
163 &brw_line_stipple,
164 &brw_aa_line_parameters,
165
166 &brw_drawing_rect,
167
168 &brw_indices,
169 &brw_index_buffer,
170 &brw_vertices,
171 };
172
173 void brw_init_state( struct brw_context *brw )
174 {
175 const struct brw_tracked_state **atoms;
176 int num_atoms;
177
178 brw_init_caches(brw);
179
180 if (brw->intel.gen >= 6) {
181 atoms = gen6_atoms;
182 num_atoms = ARRAY_SIZE(gen6_atoms);
183 } else {
184 atoms = gen4_atoms;
185 num_atoms = ARRAY_SIZE(gen4_atoms);
186 }
187
188 while (num_atoms--) {
189 assert((*atoms)->dirty.mesa |
190 (*atoms)->dirty.brw |
191 (*atoms)->dirty.cache);
192
193 if ((*atoms)->prepare)
194 brw->prepare_atoms[brw->num_prepare_atoms++] = **atoms;
195 if ((*atoms)->emit)
196 brw->emit_atoms[brw->num_emit_atoms++] = **atoms;
197 atoms++;
198 }
199 assert(brw->num_emit_atoms <= ARRAY_SIZE(brw->emit_atoms));
200 assert(brw->num_prepare_atoms <= ARRAY_SIZE(brw->prepare_atoms));
201 }
202
203
204 void brw_destroy_state( struct brw_context *brw )
205 {
206 brw_destroy_caches(brw);
207 }
208
209 /***********************************************************************
210 */
211
212 static GLuint check_state( const struct brw_state_flags *a,
213 const struct brw_state_flags *b )
214 {
215 return ((a->mesa & b->mesa) |
216 (a->brw & b->brw) |
217 (a->cache & b->cache)) != 0;
218 }
219
220 static void accumulate_state( struct brw_state_flags *a,
221 const struct brw_state_flags *b )
222 {
223 a->mesa |= b->mesa;
224 a->brw |= b->brw;
225 a->cache |= b->cache;
226 }
227
228
229 static void xor_states( struct brw_state_flags *result,
230 const struct brw_state_flags *a,
231 const struct brw_state_flags *b )
232 {
233 result->mesa = a->mesa ^ b->mesa;
234 result->brw = a->brw ^ b->brw;
235 result->cache = a->cache ^ b->cache;
236 }
237
238 void
239 brw_clear_validated_bos(struct brw_context *brw)
240 {
241 int i;
242
243 /* Clear the last round of validated bos */
244 for (i = 0; i < brw->state.validated_bo_count; i++) {
245 drm_intel_bo_unreference(brw->state.validated_bos[i]);
246 brw->state.validated_bos[i] = NULL;
247 }
248 brw->state.validated_bo_count = 0;
249 }
250
251 struct dirty_bit_map {
252 uint32_t bit;
253 char *name;
254 uint32_t count;
255 };
256
257 #define DEFINE_BIT(name) {name, #name, 0}
258
259 static struct dirty_bit_map mesa_bits[] = {
260 DEFINE_BIT(_NEW_MODELVIEW),
261 DEFINE_BIT(_NEW_PROJECTION),
262 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
263 DEFINE_BIT(_NEW_COLOR),
264 DEFINE_BIT(_NEW_DEPTH),
265 DEFINE_BIT(_NEW_EVAL),
266 DEFINE_BIT(_NEW_FOG),
267 DEFINE_BIT(_NEW_HINT),
268 DEFINE_BIT(_NEW_LIGHT),
269 DEFINE_BIT(_NEW_LINE),
270 DEFINE_BIT(_NEW_PIXEL),
271 DEFINE_BIT(_NEW_POINT),
272 DEFINE_BIT(_NEW_POLYGON),
273 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
274 DEFINE_BIT(_NEW_SCISSOR),
275 DEFINE_BIT(_NEW_STENCIL),
276 DEFINE_BIT(_NEW_TEXTURE),
277 DEFINE_BIT(_NEW_TRANSFORM),
278 DEFINE_BIT(_NEW_VIEWPORT),
279 DEFINE_BIT(_NEW_PACKUNPACK),
280 DEFINE_BIT(_NEW_ARRAY),
281 DEFINE_BIT(_NEW_RENDERMODE),
282 DEFINE_BIT(_NEW_BUFFERS),
283 DEFINE_BIT(_NEW_MULTISAMPLE),
284 DEFINE_BIT(_NEW_TRACK_MATRIX),
285 DEFINE_BIT(_NEW_PROGRAM),
286 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
287 {0, 0, 0}
288 };
289
290 static struct dirty_bit_map brw_bits[] = {
291 DEFINE_BIT(BRW_NEW_URB_FENCE),
292 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
293 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
294 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
295 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
296 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
297 DEFINE_BIT(BRW_NEW_PRIMITIVE),
298 DEFINE_BIT(BRW_NEW_CONTEXT),
299 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
300 DEFINE_BIT(BRW_NEW_PSP),
301 DEFINE_BIT(BRW_NEW_WM_SURFACES),
302 DEFINE_BIT(BRW_NEW_BINDING_TABLE),
303 DEFINE_BIT(BRW_NEW_INDICES),
304 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
305 DEFINE_BIT(BRW_NEW_VERTICES),
306 DEFINE_BIT(BRW_NEW_BATCH),
307 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
308 DEFINE_BIT(BRW_NEW_NR_WM_SURFACES),
309 DEFINE_BIT(BRW_NEW_NR_VS_SURFACES),
310 DEFINE_BIT(BRW_NEW_VS_CONSTBUF),
311 DEFINE_BIT(BRW_NEW_WM_CONSTBUF),
312 {0, 0, 0}
313 };
314
315 static struct dirty_bit_map cache_bits[] = {
316 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
317 DEFINE_BIT(CACHE_NEW_CC_VP),
318 DEFINE_BIT(CACHE_NEW_CC_UNIT),
319 DEFINE_BIT(CACHE_NEW_WM_PROG),
320 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
321 DEFINE_BIT(CACHE_NEW_SAMPLER),
322 DEFINE_BIT(CACHE_NEW_WM_UNIT),
323 DEFINE_BIT(CACHE_NEW_SF_PROG),
324 DEFINE_BIT(CACHE_NEW_SF_VP),
325 DEFINE_BIT(CACHE_NEW_SF_UNIT),
326 DEFINE_BIT(CACHE_NEW_VS_UNIT),
327 DEFINE_BIT(CACHE_NEW_VS_PROG),
328 DEFINE_BIT(CACHE_NEW_GS_UNIT),
329 DEFINE_BIT(CACHE_NEW_GS_PROG),
330 DEFINE_BIT(CACHE_NEW_CLIP_VP),
331 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
332 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
333 {0, 0, 0}
334 };
335
336
337 static void
338 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
339 {
340 int i;
341
342 for (i = 0; i < 32; i++) {
343 if (bit_map[i].bit == 0)
344 return;
345
346 if (bit_map[i].bit & bits)
347 bit_map[i].count++;
348 }
349 }
350
351 static void
352 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
353 {
354 int i;
355
356 for (i = 0; i < 32; i++) {
357 if (bit_map[i].bit == 0)
358 return;
359
360 fprintf(stderr, "0x%08x: %12d (%s)\n",
361 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
362 }
363 }
364
365 /***********************************************************************
366 * Emit all state:
367 */
368 void brw_validate_state( struct brw_context *brw )
369 {
370 struct gl_context *ctx = &brw->intel.ctx;
371 struct intel_context *intel = &brw->intel;
372 struct brw_state_flags *state = &brw->state.dirty;
373 const struct brw_tracked_state *atoms = brw->prepare_atoms;
374 int num_atoms = brw->num_prepare_atoms;
375 GLuint i;
376
377 brw_clear_validated_bos(brw);
378
379 state->mesa |= brw->intel.NewGLState;
380 brw->intel.NewGLState = 0;
381
382 brw_add_validated_bo(brw, intel->batch.bo);
383
384 if (brw->emit_state_always) {
385 state->mesa |= ~0;
386 state->brw |= ~0;
387 state->cache |= ~0;
388 }
389
390 if (brw->fragment_program != ctx->FragmentProgram._Current) {
391 brw->fragment_program = ctx->FragmentProgram._Current;
392 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
393 }
394
395 if (brw->vertex_program != ctx->VertexProgram._Current) {
396 brw->vertex_program = ctx->VertexProgram._Current;
397 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
398 }
399
400 if ((state->mesa | state->cache | state->brw) == 0)
401 return;
402
403 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
404
405 /* do prepare stage for all atoms */
406 for (i = 0; i < num_atoms; i++) {
407 const struct brw_tracked_state *atom = &atoms[i];
408
409 if (check_state(state, &atom->dirty)) {
410 atom->prepare(brw);
411
412 if (brw->intel.Fallback)
413 break;
414 }
415 }
416
417 intel_check_front_buffer_rendering(intel);
418
419 /* Make sure that the textures which are referenced by the current
420 * brw fragment program are actually present/valid.
421 * If this fails, we can experience GPU lock-ups.
422 */
423 {
424 const struct brw_fragment_program *fp;
425 fp = brw_fragment_program_const(brw->fragment_program);
426 if (fp) {
427 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
428 == fp->tex_units_used);
429 }
430 }
431 }
432
433
434 void brw_upload_state(struct brw_context *brw)
435 {
436 struct brw_state_flags *state = &brw->state.dirty;
437 const struct brw_tracked_state *atoms = brw->emit_atoms;
438 int num_atoms = brw->num_emit_atoms;
439 int i;
440 static int dirty_count = 0;
441
442 brw_clear_validated_bos(brw);
443
444 if (unlikely(INTEL_DEBUG)) {
445 /* Debug version which enforces various sanity checks on the
446 * state flags which are generated and checked to help ensure
447 * state atoms are ordered correctly in the list.
448 */
449 struct brw_state_flags examined, prev;
450 memset(&examined, 0, sizeof(examined));
451 prev = *state;
452
453 for (i = 0; i < num_atoms; i++) {
454 const struct brw_tracked_state *atom = &atoms[i];
455 struct brw_state_flags generated;
456
457 if (brw->intel.Fallback)
458 break;
459
460 if (check_state(state, &atom->dirty)) {
461 atom->emit(brw);
462 }
463
464 accumulate_state(&examined, &atom->dirty);
465
466 /* generated = (prev ^ state)
467 * if (examined & generated)
468 * fail;
469 */
470 xor_states(&generated, &prev, state);
471 assert(!check_state(&examined, &generated));
472 prev = *state;
473 }
474 }
475 else {
476 for (i = 0; i < num_atoms; i++) {
477 const struct brw_tracked_state *atom = &atoms[i];
478
479 if (brw->intel.Fallback)
480 break;
481
482 if (check_state(state, &atom->dirty)) {
483 atom->emit(brw);
484 }
485 }
486 }
487
488 if (unlikely(INTEL_DEBUG & DEBUG_STATE)) {
489 brw_update_dirty_count(mesa_bits, state->mesa);
490 brw_update_dirty_count(brw_bits, state->brw);
491 brw_update_dirty_count(cache_bits, state->cache);
492 if (dirty_count++ % 1000 == 0) {
493 brw_print_dirty_count(mesa_bits, state->mesa);
494 brw_print_dirty_count(brw_bits, state->brw);
495 brw_print_dirty_count(cache_bits, state->cache);
496 fprintf(stderr, "\n");
497 }
498 }
499
500 if (!brw->intel.Fallback)
501 memset(state, 0, sizeof(*state));
502 }