i965: Start adding support for the Sandybridge CC unit.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 #if 0
110 &brw_wm_input_sizes,
111 &brw_vs_prog,
112 &brw_gs_prog,
113 &brw_clip_prog,
114 &brw_sf_prog,
115 &brw_wm_prog,
116
117 /* Once all the programs are done, we know how large urb entry
118 * sizes need to be and can decide if we need to change the urb
119 * layout.
120 */
121 &brw_curbe_offsets,
122 &brw_recalculate_urb_fence,
123
124 &brw_cc_vp,
125
126 #endif
127 &gen6_blend_state, /* must do before cc unit */
128 &gen6_color_calc_state, /* must do before cc unit */
129 &gen6_depth_stencil_state, /* must do before cc unit */
130 &gen6_cc_state_pointers,
131
132 &brw_vs_surfaces, /* must do before unit */
133 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
134 &brw_wm_surfaces, /* must do before samplers and unit */
135 #if 0
136 &brw_wm_samplers,
137
138 &brw_wm_unit,
139 &brw_sf_vp,
140 &brw_sf_unit,
141 &brw_vs_unit, /* always required, enabled or not */
142 &brw_clip_unit,
143 &brw_gs_unit,
144
145 /* Command packets:
146 */
147 &brw_invarient_state,
148 &brw_state_base_address,
149
150 &brw_binding_table_pointers,
151 &brw_blend_constant_color,
152 #endif
153
154 &brw_depthbuffer,
155
156 #if 0
157 &brw_polygon_stipple,
158 &brw_polygon_stipple_offset,
159
160 &brw_line_stipple,
161 &brw_aa_line_parameters,
162
163 &brw_psp_urb_cbs,
164
165 &brw_drawing_rect,
166 &brw_indices,
167 &brw_index_buffer,
168 &brw_vertices,
169
170 &brw_constant_buffer
171 #endif
172 };
173
174 void brw_init_state( struct brw_context *brw )
175 {
176 brw_init_caches(brw);
177 }
178
179
180 void brw_destroy_state( struct brw_context *brw )
181 {
182 brw_destroy_caches(brw);
183 brw_destroy_batch_cache(brw);
184 }
185
186 /***********************************************************************
187 */
188
189 static GLboolean check_state( const struct brw_state_flags *a,
190 const struct brw_state_flags *b )
191 {
192 return ((a->mesa & b->mesa) ||
193 (a->brw & b->brw) ||
194 (a->cache & b->cache));
195 }
196
197 static void accumulate_state( struct brw_state_flags *a,
198 const struct brw_state_flags *b )
199 {
200 a->mesa |= b->mesa;
201 a->brw |= b->brw;
202 a->cache |= b->cache;
203 }
204
205
206 static void xor_states( struct brw_state_flags *result,
207 const struct brw_state_flags *a,
208 const struct brw_state_flags *b )
209 {
210 result->mesa = a->mesa ^ b->mesa;
211 result->brw = a->brw ^ b->brw;
212 result->cache = a->cache ^ b->cache;
213 }
214
215 void
216 brw_clear_validated_bos(struct brw_context *brw)
217 {
218 int i;
219
220 /* Clear the last round of validated bos */
221 for (i = 0; i < brw->state.validated_bo_count; i++) {
222 dri_bo_unreference(brw->state.validated_bos[i]);
223 brw->state.validated_bos[i] = NULL;
224 }
225 brw->state.validated_bo_count = 0;
226 }
227
228 struct dirty_bit_map {
229 uint32_t bit;
230 char *name;
231 uint32_t count;
232 };
233
234 #define DEFINE_BIT(name) {name, #name, 0}
235
236 static struct dirty_bit_map mesa_bits[] = {
237 DEFINE_BIT(_NEW_MODELVIEW),
238 DEFINE_BIT(_NEW_PROJECTION),
239 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
240 DEFINE_BIT(_NEW_COLOR_MATRIX),
241 DEFINE_BIT(_NEW_ACCUM),
242 DEFINE_BIT(_NEW_COLOR),
243 DEFINE_BIT(_NEW_DEPTH),
244 DEFINE_BIT(_NEW_EVAL),
245 DEFINE_BIT(_NEW_FOG),
246 DEFINE_BIT(_NEW_HINT),
247 DEFINE_BIT(_NEW_LIGHT),
248 DEFINE_BIT(_NEW_LINE),
249 DEFINE_BIT(_NEW_PIXEL),
250 DEFINE_BIT(_NEW_POINT),
251 DEFINE_BIT(_NEW_POLYGON),
252 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
253 DEFINE_BIT(_NEW_SCISSOR),
254 DEFINE_BIT(_NEW_STENCIL),
255 DEFINE_BIT(_NEW_TEXTURE),
256 DEFINE_BIT(_NEW_TRANSFORM),
257 DEFINE_BIT(_NEW_VIEWPORT),
258 DEFINE_BIT(_NEW_PACKUNPACK),
259 DEFINE_BIT(_NEW_ARRAY),
260 DEFINE_BIT(_NEW_RENDERMODE),
261 DEFINE_BIT(_NEW_BUFFERS),
262 DEFINE_BIT(_NEW_MULTISAMPLE),
263 DEFINE_BIT(_NEW_TRACK_MATRIX),
264 DEFINE_BIT(_NEW_PROGRAM),
265 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
266 {0, 0, 0}
267 };
268
269 static struct dirty_bit_map brw_bits[] = {
270 DEFINE_BIT(BRW_NEW_URB_FENCE),
271 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
272 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
273 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
274 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
275 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
276 DEFINE_BIT(BRW_NEW_PRIMITIVE),
277 DEFINE_BIT(BRW_NEW_CONTEXT),
278 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
279 DEFINE_BIT(BRW_NEW_PSP),
280 DEFINE_BIT(BRW_NEW_INDICES),
281 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
282 DEFINE_BIT(BRW_NEW_VERTICES),
283 DEFINE_BIT(BRW_NEW_BATCH),
284 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
285 {0, 0, 0}
286 };
287
288 static struct dirty_bit_map cache_bits[] = {
289 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
290 DEFINE_BIT(CACHE_NEW_CC_VP),
291 DEFINE_BIT(CACHE_NEW_CC_UNIT),
292 DEFINE_BIT(CACHE_NEW_WM_PROG),
293 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
294 DEFINE_BIT(CACHE_NEW_SAMPLER),
295 DEFINE_BIT(CACHE_NEW_WM_UNIT),
296 DEFINE_BIT(CACHE_NEW_SF_PROG),
297 DEFINE_BIT(CACHE_NEW_SF_VP),
298 DEFINE_BIT(CACHE_NEW_SF_UNIT),
299 DEFINE_BIT(CACHE_NEW_VS_UNIT),
300 DEFINE_BIT(CACHE_NEW_VS_PROG),
301 DEFINE_BIT(CACHE_NEW_GS_UNIT),
302 DEFINE_BIT(CACHE_NEW_GS_PROG),
303 DEFINE_BIT(CACHE_NEW_CLIP_VP),
304 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
305 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
306 DEFINE_BIT(CACHE_NEW_SURFACE),
307 DEFINE_BIT(CACHE_NEW_SURF_BIND),
308 {0, 0, 0}
309 };
310
311
312 static void
313 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
314 {
315 int i;
316
317 for (i = 0; i < 32; i++) {
318 if (bit_map[i].bit == 0)
319 return;
320
321 if (bit_map[i].bit & bits)
322 bit_map[i].count++;
323 }
324 }
325
326 static void
327 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
328 {
329 int i;
330
331 for (i = 0; i < 32; i++) {
332 if (bit_map[i].bit == 0)
333 return;
334
335 fprintf(stderr, "0x%08x: %12d (%s)\n",
336 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
337 }
338 }
339
340 /***********************************************************************
341 * Emit all state:
342 */
343 void brw_validate_state( struct brw_context *brw )
344 {
345 GLcontext *ctx = &brw->intel.ctx;
346 struct intel_context *intel = &brw->intel;
347 struct brw_state_flags *state = &brw->state.dirty;
348 GLuint i;
349 const struct brw_tracked_state **atoms;
350 int num_atoms;
351
352 brw_clear_validated_bos(brw);
353
354 state->mesa |= brw->intel.NewGLState;
355 brw->intel.NewGLState = 0;
356
357 brw_add_validated_bo(brw, intel->batch->buf);
358
359 if (IS_GEN6(intel->intelScreen->deviceID)) {
360 atoms = gen6_atoms;
361 num_atoms = ARRAY_SIZE(gen6_atoms);
362 } else {
363 atoms = gen4_atoms;
364 num_atoms = ARRAY_SIZE(gen4_atoms);
365 }
366
367 if (brw->emit_state_always) {
368 state->mesa |= ~0;
369 state->brw |= ~0;
370 state->cache |= ~0;
371 }
372
373 if (brw->fragment_program != ctx->FragmentProgram._Current) {
374 brw->fragment_program = ctx->FragmentProgram._Current;
375 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
376 }
377
378 if (brw->vertex_program != ctx->VertexProgram._Current) {
379 brw->vertex_program = ctx->VertexProgram._Current;
380 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
381 }
382
383 if (state->mesa == 0 &&
384 state->cache == 0 &&
385 state->brw == 0)
386 return;
387
388 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
389 brw_clear_batch_cache(brw);
390
391 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
392
393 /* do prepare stage for all atoms */
394 for (i = 0; i < num_atoms; i++) {
395 const struct brw_tracked_state *atom = atoms[i];
396
397 if (brw->intel.Fallback)
398 break;
399
400 if (check_state(state, &atom->dirty)) {
401 if (atom->prepare) {
402 atom->prepare(brw);
403 }
404 }
405 }
406
407 intel_check_front_buffer_rendering(intel);
408
409 /* Make sure that the textures which are referenced by the current
410 * brw fragment program are actually present/valid.
411 * If this fails, we can experience GPU lock-ups.
412 */
413 {
414 const struct brw_fragment_program *fp;
415 fp = brw_fragment_program_const(brw->fragment_program);
416 if (fp) {
417 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
418 == fp->tex_units_used);
419 }
420 }
421 }
422
423
424 void brw_upload_state(struct brw_context *brw)
425 {
426 struct intel_context *intel = &brw->intel;
427 struct brw_state_flags *state = &brw->state.dirty;
428 int i;
429 static int dirty_count = 0;
430 const struct brw_tracked_state **atoms;
431 int num_atoms;
432
433 if (IS_GEN6(intel->intelScreen->deviceID)) {
434 atoms = gen6_atoms;
435 num_atoms = ARRAY_SIZE(gen6_atoms);
436 } else {
437 atoms = gen4_atoms;
438 num_atoms = ARRAY_SIZE(gen4_atoms);
439 }
440
441 brw_clear_validated_bos(brw);
442
443 if (INTEL_DEBUG) {
444 /* Debug version which enforces various sanity checks on the
445 * state flags which are generated and checked to help ensure
446 * state atoms are ordered correctly in the list.
447 */
448 struct brw_state_flags examined, prev;
449 memset(&examined, 0, sizeof(examined));
450 prev = *state;
451
452 for (i = 0; i < num_atoms; i++) {
453 const struct brw_tracked_state *atom = atoms[i];
454 struct brw_state_flags generated;
455
456 assert(atom->dirty.mesa ||
457 atom->dirty.brw ||
458 atom->dirty.cache);
459
460 if (brw->intel.Fallback)
461 break;
462
463 if (check_state(state, &atom->dirty)) {
464 if (atom->emit) {
465 atom->emit( brw );
466 }
467 }
468
469 accumulate_state(&examined, &atom->dirty);
470
471 /* generated = (prev ^ state)
472 * if (examined & generated)
473 * fail;
474 */
475 xor_states(&generated, &prev, state);
476 assert(!check_state(&examined, &generated));
477 prev = *state;
478 }
479 }
480 else {
481 for (i = 0; i < num_atoms; i++) {
482 const struct brw_tracked_state *atom = atoms[i];
483
484 if (brw->intel.Fallback)
485 break;
486
487 if (check_state(state, &atom->dirty)) {
488 if (atom->emit) {
489 atom->emit( brw );
490 }
491 }
492 }
493 }
494
495 if (INTEL_DEBUG & DEBUG_STATE) {
496 brw_update_dirty_count(mesa_bits, state->mesa);
497 brw_update_dirty_count(brw_bits, state->brw);
498 brw_update_dirty_count(cache_bits, state->cache);
499 if (dirty_count++ % 1000 == 0) {
500 brw_print_dirty_count(mesa_bits, state->mesa);
501 brw_print_dirty_count(brw_bits, state->brw);
502 brw_print_dirty_count(cache_bits, state->cache);
503 fprintf(stderr, "\n");
504 }
505 }
506
507 if (!brw->intel.Fallback)
508 memset(state, 0, sizeof(*state));
509 }