i965: Set the state base address on Sandybridge.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 #if 0
113 &brw_sf_prog,
114 &brw_wm_prog,
115
116 /* Once all the programs are done, we know how large urb entry
117 * sizes need to be and can decide if we need to change the urb
118 * layout.
119 */
120 &brw_curbe_offsets,
121
122 &brw_cc_vp,
123
124 #endif
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129 &gen6_cc_state_pointers,
130
131 &brw_vs_surfaces, /* must do before unit */
132 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
133 &brw_wm_surfaces, /* must do before samplers and unit */
134
135 &gen6_vs_state,
136 &gen6_gs_state,
137 &gen6_clip_state,
138 #if 0
139 &brw_wm_samplers,
140
141 &brw_wm_unit,
142 &brw_sf_vp,
143 &brw_sf_unit,
144
145 /* Command packets:
146 */
147 &brw_invarient_state,
148 #endif
149
150 &brw_state_base_address,
151
152 #if 0
153 &brw_binding_table_pointers,
154 &brw_blend_constant_color,
155 #endif
156
157 &brw_depthbuffer,
158
159 #if 0
160 &brw_polygon_stipple,
161 &brw_polygon_stipple_offset,
162
163 &brw_line_stipple,
164 &brw_aa_line_parameters,
165
166 &brw_psp_urb_cbs,
167
168 &brw_drawing_rect,
169 #endif
170
171 &brw_indices,
172 &brw_index_buffer,
173 &brw_vertices,
174 };
175
176 void brw_init_state( struct brw_context *brw )
177 {
178 brw_init_caches(brw);
179 }
180
181
182 void brw_destroy_state( struct brw_context *brw )
183 {
184 brw_destroy_caches(brw);
185 brw_destroy_batch_cache(brw);
186 }
187
188 /***********************************************************************
189 */
190
191 static GLboolean check_state( const struct brw_state_flags *a,
192 const struct brw_state_flags *b )
193 {
194 return ((a->mesa & b->mesa) ||
195 (a->brw & b->brw) ||
196 (a->cache & b->cache));
197 }
198
199 static void accumulate_state( struct brw_state_flags *a,
200 const struct brw_state_flags *b )
201 {
202 a->mesa |= b->mesa;
203 a->brw |= b->brw;
204 a->cache |= b->cache;
205 }
206
207
208 static void xor_states( struct brw_state_flags *result,
209 const struct brw_state_flags *a,
210 const struct brw_state_flags *b )
211 {
212 result->mesa = a->mesa ^ b->mesa;
213 result->brw = a->brw ^ b->brw;
214 result->cache = a->cache ^ b->cache;
215 }
216
217 void
218 brw_clear_validated_bos(struct brw_context *brw)
219 {
220 int i;
221
222 /* Clear the last round of validated bos */
223 for (i = 0; i < brw->state.validated_bo_count; i++) {
224 dri_bo_unreference(brw->state.validated_bos[i]);
225 brw->state.validated_bos[i] = NULL;
226 }
227 brw->state.validated_bo_count = 0;
228 }
229
230 struct dirty_bit_map {
231 uint32_t bit;
232 char *name;
233 uint32_t count;
234 };
235
236 #define DEFINE_BIT(name) {name, #name, 0}
237
238 static struct dirty_bit_map mesa_bits[] = {
239 DEFINE_BIT(_NEW_MODELVIEW),
240 DEFINE_BIT(_NEW_PROJECTION),
241 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
242 DEFINE_BIT(_NEW_COLOR_MATRIX),
243 DEFINE_BIT(_NEW_ACCUM),
244 DEFINE_BIT(_NEW_COLOR),
245 DEFINE_BIT(_NEW_DEPTH),
246 DEFINE_BIT(_NEW_EVAL),
247 DEFINE_BIT(_NEW_FOG),
248 DEFINE_BIT(_NEW_HINT),
249 DEFINE_BIT(_NEW_LIGHT),
250 DEFINE_BIT(_NEW_LINE),
251 DEFINE_BIT(_NEW_PIXEL),
252 DEFINE_BIT(_NEW_POINT),
253 DEFINE_BIT(_NEW_POLYGON),
254 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
255 DEFINE_BIT(_NEW_SCISSOR),
256 DEFINE_BIT(_NEW_STENCIL),
257 DEFINE_BIT(_NEW_TEXTURE),
258 DEFINE_BIT(_NEW_TRANSFORM),
259 DEFINE_BIT(_NEW_VIEWPORT),
260 DEFINE_BIT(_NEW_PACKUNPACK),
261 DEFINE_BIT(_NEW_ARRAY),
262 DEFINE_BIT(_NEW_RENDERMODE),
263 DEFINE_BIT(_NEW_BUFFERS),
264 DEFINE_BIT(_NEW_MULTISAMPLE),
265 DEFINE_BIT(_NEW_TRACK_MATRIX),
266 DEFINE_BIT(_NEW_PROGRAM),
267 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
268 {0, 0, 0}
269 };
270
271 static struct dirty_bit_map brw_bits[] = {
272 DEFINE_BIT(BRW_NEW_URB_FENCE),
273 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
274 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
275 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
276 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
277 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
278 DEFINE_BIT(BRW_NEW_PRIMITIVE),
279 DEFINE_BIT(BRW_NEW_CONTEXT),
280 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
281 DEFINE_BIT(BRW_NEW_PSP),
282 DEFINE_BIT(BRW_NEW_INDICES),
283 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
284 DEFINE_BIT(BRW_NEW_VERTICES),
285 DEFINE_BIT(BRW_NEW_BATCH),
286 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
287 {0, 0, 0}
288 };
289
290 static struct dirty_bit_map cache_bits[] = {
291 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
292 DEFINE_BIT(CACHE_NEW_CC_VP),
293 DEFINE_BIT(CACHE_NEW_CC_UNIT),
294 DEFINE_BIT(CACHE_NEW_WM_PROG),
295 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
296 DEFINE_BIT(CACHE_NEW_SAMPLER),
297 DEFINE_BIT(CACHE_NEW_WM_UNIT),
298 DEFINE_BIT(CACHE_NEW_SF_PROG),
299 DEFINE_BIT(CACHE_NEW_SF_VP),
300 DEFINE_BIT(CACHE_NEW_SF_UNIT),
301 DEFINE_BIT(CACHE_NEW_VS_UNIT),
302 DEFINE_BIT(CACHE_NEW_VS_PROG),
303 DEFINE_BIT(CACHE_NEW_GS_UNIT),
304 DEFINE_BIT(CACHE_NEW_GS_PROG),
305 DEFINE_BIT(CACHE_NEW_CLIP_VP),
306 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
307 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
308 DEFINE_BIT(CACHE_NEW_SURFACE),
309 DEFINE_BIT(CACHE_NEW_SURF_BIND),
310 {0, 0, 0}
311 };
312
313
314 static void
315 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
316 {
317 int i;
318
319 for (i = 0; i < 32; i++) {
320 if (bit_map[i].bit == 0)
321 return;
322
323 if (bit_map[i].bit & bits)
324 bit_map[i].count++;
325 }
326 }
327
328 static void
329 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
330 {
331 int i;
332
333 for (i = 0; i < 32; i++) {
334 if (bit_map[i].bit == 0)
335 return;
336
337 fprintf(stderr, "0x%08x: %12d (%s)\n",
338 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
339 }
340 }
341
342 /***********************************************************************
343 * Emit all state:
344 */
345 void brw_validate_state( struct brw_context *brw )
346 {
347 GLcontext *ctx = &brw->intel.ctx;
348 struct intel_context *intel = &brw->intel;
349 struct brw_state_flags *state = &brw->state.dirty;
350 GLuint i;
351 const struct brw_tracked_state **atoms;
352 int num_atoms;
353
354 brw_clear_validated_bos(brw);
355
356 state->mesa |= brw->intel.NewGLState;
357 brw->intel.NewGLState = 0;
358
359 brw_add_validated_bo(brw, intel->batch->buf);
360
361 if (IS_GEN6(intel->intelScreen->deviceID)) {
362 atoms = gen6_atoms;
363 num_atoms = ARRAY_SIZE(gen6_atoms);
364 } else {
365 atoms = gen4_atoms;
366 num_atoms = ARRAY_SIZE(gen4_atoms);
367 }
368
369 if (brw->emit_state_always) {
370 state->mesa |= ~0;
371 state->brw |= ~0;
372 state->cache |= ~0;
373 }
374
375 if (brw->fragment_program != ctx->FragmentProgram._Current) {
376 brw->fragment_program = ctx->FragmentProgram._Current;
377 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
378 }
379
380 if (brw->vertex_program != ctx->VertexProgram._Current) {
381 brw->vertex_program = ctx->VertexProgram._Current;
382 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
383 }
384
385 if (state->mesa == 0 &&
386 state->cache == 0 &&
387 state->brw == 0)
388 return;
389
390 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
391 brw_clear_batch_cache(brw);
392
393 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
394
395 /* do prepare stage for all atoms */
396 for (i = 0; i < num_atoms; i++) {
397 const struct brw_tracked_state *atom = atoms[i];
398
399 if (brw->intel.Fallback)
400 break;
401
402 if (check_state(state, &atom->dirty)) {
403 if (atom->prepare) {
404 atom->prepare(brw);
405 }
406 }
407 }
408
409 intel_check_front_buffer_rendering(intel);
410
411 /* Make sure that the textures which are referenced by the current
412 * brw fragment program are actually present/valid.
413 * If this fails, we can experience GPU lock-ups.
414 */
415 {
416 const struct brw_fragment_program *fp;
417 fp = brw_fragment_program_const(brw->fragment_program);
418 if (fp) {
419 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
420 == fp->tex_units_used);
421 }
422 }
423 }
424
425
426 void brw_upload_state(struct brw_context *brw)
427 {
428 struct intel_context *intel = &brw->intel;
429 struct brw_state_flags *state = &brw->state.dirty;
430 int i;
431 static int dirty_count = 0;
432 const struct brw_tracked_state **atoms;
433 int num_atoms;
434
435 if (IS_GEN6(intel->intelScreen->deviceID)) {
436 atoms = gen6_atoms;
437 num_atoms = ARRAY_SIZE(gen6_atoms);
438 } else {
439 atoms = gen4_atoms;
440 num_atoms = ARRAY_SIZE(gen4_atoms);
441 }
442
443 brw_clear_validated_bos(brw);
444
445 if (INTEL_DEBUG) {
446 /* Debug version which enforces various sanity checks on the
447 * state flags which are generated and checked to help ensure
448 * state atoms are ordered correctly in the list.
449 */
450 struct brw_state_flags examined, prev;
451 memset(&examined, 0, sizeof(examined));
452 prev = *state;
453
454 for (i = 0; i < num_atoms; i++) {
455 const struct brw_tracked_state *atom = atoms[i];
456 struct brw_state_flags generated;
457
458 assert(atom->dirty.mesa ||
459 atom->dirty.brw ||
460 atom->dirty.cache);
461
462 if (brw->intel.Fallback)
463 break;
464
465 if (check_state(state, &atom->dirty)) {
466 if (atom->emit) {
467 atom->emit( brw );
468 }
469 }
470
471 accumulate_state(&examined, &atom->dirty);
472
473 /* generated = (prev ^ state)
474 * if (examined & generated)
475 * fail;
476 */
477 xor_states(&generated, &prev, state);
478 assert(!check_state(&examined, &generated));
479 prev = *state;
480 }
481 }
482 else {
483 for (i = 0; i < num_atoms; i++) {
484 const struct brw_tracked_state *atom = atoms[i];
485
486 if (brw->intel.Fallback)
487 break;
488
489 if (check_state(state, &atom->dirty)) {
490 if (atom->emit) {
491 atom->emit( brw );
492 }
493 }
494 }
495 }
496
497 if (INTEL_DEBUG & DEBUG_STATE) {
498 brw_update_dirty_count(mesa_bits, state->mesa);
499 brw_update_dirty_count(brw_bits, state->brw);
500 brw_update_dirty_count(cache_bits, state->cache);
501 if (dirty_count++ % 1000 == 0) {
502 brw_print_dirty_count(mesa_bits, state->mesa);
503 brw_print_dirty_count(brw_bits, state->brw);
504 brw_print_dirty_count(cache_bits, state->cache);
505 fprintf(stderr, "\n");
506 }
507 }
508
509 if (!brw->intel.Fallback)
510 memset(state, 0, sizeof(*state));
511 }