i965: Add Sandybridge viewport setup.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 #if 0
113 &brw_sf_prog,
114 &brw_wm_prog,
115
116 /* Once all the programs are done, we know how large urb entry
117 * sizes need to be and can decide if we need to change the urb
118 * layout.
119 */
120 &brw_curbe_offsets,
121
122
123 #endif
124 &gen6_clip_vp,
125 &gen6_sf_vp,
126 &gen6_cc_vp,
127 &gen6_viewport_state, /* must do after *_vp stages */
128
129 &gen6_urb,
130 &gen6_blend_state, /* must do before cc unit */
131 &gen6_color_calc_state, /* must do before cc unit */
132 &gen6_depth_stencil_state, /* must do before cc unit */
133 &gen6_cc_state_pointers,
134
135 &brw_vs_surfaces, /* must do before unit */
136 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
137 &brw_wm_surfaces, /* must do before samplers and unit */
138
139 &gen6_vs_state,
140 &gen6_gs_state,
141 &gen6_clip_state,
142 #if 0
143 &brw_wm_samplers,
144
145 &brw_wm_unit,
146 #endif
147 &gen6_scissor_state,
148
149 #if 0
150 &brw_sf_unit,
151
152 /* Command packets:
153 */
154 &brw_invarient_state,
155 #endif
156
157 &brw_state_base_address,
158
159 #if 0
160 &brw_binding_table_pointers,
161 &brw_blend_constant_color,
162 #endif
163
164 &brw_depthbuffer,
165
166 #if 0
167 &brw_polygon_stipple,
168 &brw_polygon_stipple_offset,
169
170 &brw_line_stipple,
171 &brw_aa_line_parameters,
172
173 &brw_psp_urb_cbs,
174 #endif
175
176 &brw_drawing_rect,
177
178 &brw_indices,
179 &brw_index_buffer,
180 &brw_vertices,
181 };
182
183 void brw_init_state( struct brw_context *brw )
184 {
185 brw_init_caches(brw);
186 }
187
188
189 void brw_destroy_state( struct brw_context *brw )
190 {
191 brw_destroy_caches(brw);
192 brw_destroy_batch_cache(brw);
193 }
194
195 /***********************************************************************
196 */
197
198 static GLboolean check_state( const struct brw_state_flags *a,
199 const struct brw_state_flags *b )
200 {
201 return ((a->mesa & b->mesa) ||
202 (a->brw & b->brw) ||
203 (a->cache & b->cache));
204 }
205
206 static void accumulate_state( struct brw_state_flags *a,
207 const struct brw_state_flags *b )
208 {
209 a->mesa |= b->mesa;
210 a->brw |= b->brw;
211 a->cache |= b->cache;
212 }
213
214
215 static void xor_states( struct brw_state_flags *result,
216 const struct brw_state_flags *a,
217 const struct brw_state_flags *b )
218 {
219 result->mesa = a->mesa ^ b->mesa;
220 result->brw = a->brw ^ b->brw;
221 result->cache = a->cache ^ b->cache;
222 }
223
224 void
225 brw_clear_validated_bos(struct brw_context *brw)
226 {
227 int i;
228
229 /* Clear the last round of validated bos */
230 for (i = 0; i < brw->state.validated_bo_count; i++) {
231 dri_bo_unreference(brw->state.validated_bos[i]);
232 brw->state.validated_bos[i] = NULL;
233 }
234 brw->state.validated_bo_count = 0;
235 }
236
237 struct dirty_bit_map {
238 uint32_t bit;
239 char *name;
240 uint32_t count;
241 };
242
243 #define DEFINE_BIT(name) {name, #name, 0}
244
245 static struct dirty_bit_map mesa_bits[] = {
246 DEFINE_BIT(_NEW_MODELVIEW),
247 DEFINE_BIT(_NEW_PROJECTION),
248 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
249 DEFINE_BIT(_NEW_COLOR_MATRIX),
250 DEFINE_BIT(_NEW_ACCUM),
251 DEFINE_BIT(_NEW_COLOR),
252 DEFINE_BIT(_NEW_DEPTH),
253 DEFINE_BIT(_NEW_EVAL),
254 DEFINE_BIT(_NEW_FOG),
255 DEFINE_BIT(_NEW_HINT),
256 DEFINE_BIT(_NEW_LIGHT),
257 DEFINE_BIT(_NEW_LINE),
258 DEFINE_BIT(_NEW_PIXEL),
259 DEFINE_BIT(_NEW_POINT),
260 DEFINE_BIT(_NEW_POLYGON),
261 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
262 DEFINE_BIT(_NEW_SCISSOR),
263 DEFINE_BIT(_NEW_STENCIL),
264 DEFINE_BIT(_NEW_TEXTURE),
265 DEFINE_BIT(_NEW_TRANSFORM),
266 DEFINE_BIT(_NEW_VIEWPORT),
267 DEFINE_BIT(_NEW_PACKUNPACK),
268 DEFINE_BIT(_NEW_ARRAY),
269 DEFINE_BIT(_NEW_RENDERMODE),
270 DEFINE_BIT(_NEW_BUFFERS),
271 DEFINE_BIT(_NEW_MULTISAMPLE),
272 DEFINE_BIT(_NEW_TRACK_MATRIX),
273 DEFINE_BIT(_NEW_PROGRAM),
274 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
275 {0, 0, 0}
276 };
277
278 static struct dirty_bit_map brw_bits[] = {
279 DEFINE_BIT(BRW_NEW_URB_FENCE),
280 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
281 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
282 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
283 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
284 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
285 DEFINE_BIT(BRW_NEW_PRIMITIVE),
286 DEFINE_BIT(BRW_NEW_CONTEXT),
287 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
288 DEFINE_BIT(BRW_NEW_PSP),
289 DEFINE_BIT(BRW_NEW_INDICES),
290 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
291 DEFINE_BIT(BRW_NEW_VERTICES),
292 DEFINE_BIT(BRW_NEW_BATCH),
293 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
294 {0, 0, 0}
295 };
296
297 static struct dirty_bit_map cache_bits[] = {
298 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
299 DEFINE_BIT(CACHE_NEW_CC_VP),
300 DEFINE_BIT(CACHE_NEW_CC_UNIT),
301 DEFINE_BIT(CACHE_NEW_WM_PROG),
302 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
303 DEFINE_BIT(CACHE_NEW_SAMPLER),
304 DEFINE_BIT(CACHE_NEW_WM_UNIT),
305 DEFINE_BIT(CACHE_NEW_SF_PROG),
306 DEFINE_BIT(CACHE_NEW_SF_VP),
307 DEFINE_BIT(CACHE_NEW_SF_UNIT),
308 DEFINE_BIT(CACHE_NEW_VS_UNIT),
309 DEFINE_BIT(CACHE_NEW_VS_PROG),
310 DEFINE_BIT(CACHE_NEW_GS_UNIT),
311 DEFINE_BIT(CACHE_NEW_GS_PROG),
312 DEFINE_BIT(CACHE_NEW_CLIP_VP),
313 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
314 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
315 DEFINE_BIT(CACHE_NEW_SURFACE),
316 DEFINE_BIT(CACHE_NEW_SURF_BIND),
317 {0, 0, 0}
318 };
319
320
321 static void
322 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
323 {
324 int i;
325
326 for (i = 0; i < 32; i++) {
327 if (bit_map[i].bit == 0)
328 return;
329
330 if (bit_map[i].bit & bits)
331 bit_map[i].count++;
332 }
333 }
334
335 static void
336 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
337 {
338 int i;
339
340 for (i = 0; i < 32; i++) {
341 if (bit_map[i].bit == 0)
342 return;
343
344 fprintf(stderr, "0x%08x: %12d (%s)\n",
345 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
346 }
347 }
348
349 /***********************************************************************
350 * Emit all state:
351 */
352 void brw_validate_state( struct brw_context *brw )
353 {
354 GLcontext *ctx = &brw->intel.ctx;
355 struct intel_context *intel = &brw->intel;
356 struct brw_state_flags *state = &brw->state.dirty;
357 GLuint i;
358 const struct brw_tracked_state **atoms;
359 int num_atoms;
360
361 brw_clear_validated_bos(brw);
362
363 state->mesa |= brw->intel.NewGLState;
364 brw->intel.NewGLState = 0;
365
366 brw_add_validated_bo(brw, intel->batch->buf);
367
368 if (IS_GEN6(intel->intelScreen->deviceID)) {
369 atoms = gen6_atoms;
370 num_atoms = ARRAY_SIZE(gen6_atoms);
371 } else {
372 atoms = gen4_atoms;
373 num_atoms = ARRAY_SIZE(gen4_atoms);
374 }
375
376 if (brw->emit_state_always) {
377 state->mesa |= ~0;
378 state->brw |= ~0;
379 state->cache |= ~0;
380 }
381
382 if (brw->fragment_program != ctx->FragmentProgram._Current) {
383 brw->fragment_program = ctx->FragmentProgram._Current;
384 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
385 }
386
387 if (brw->vertex_program != ctx->VertexProgram._Current) {
388 brw->vertex_program = ctx->VertexProgram._Current;
389 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
390 }
391
392 if (state->mesa == 0 &&
393 state->cache == 0 &&
394 state->brw == 0)
395 return;
396
397 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
398 brw_clear_batch_cache(brw);
399
400 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
401
402 /* do prepare stage for all atoms */
403 for (i = 0; i < num_atoms; i++) {
404 const struct brw_tracked_state *atom = atoms[i];
405
406 if (brw->intel.Fallback)
407 break;
408
409 if (check_state(state, &atom->dirty)) {
410 if (atom->prepare) {
411 atom->prepare(brw);
412 }
413 }
414 }
415
416 intel_check_front_buffer_rendering(intel);
417
418 /* Make sure that the textures which are referenced by the current
419 * brw fragment program are actually present/valid.
420 * If this fails, we can experience GPU lock-ups.
421 */
422 {
423 const struct brw_fragment_program *fp;
424 fp = brw_fragment_program_const(brw->fragment_program);
425 if (fp) {
426 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
427 == fp->tex_units_used);
428 }
429 }
430 }
431
432
433 void brw_upload_state(struct brw_context *brw)
434 {
435 struct intel_context *intel = &brw->intel;
436 struct brw_state_flags *state = &brw->state.dirty;
437 int i;
438 static int dirty_count = 0;
439 const struct brw_tracked_state **atoms;
440 int num_atoms;
441
442 if (IS_GEN6(intel->intelScreen->deviceID)) {
443 atoms = gen6_atoms;
444 num_atoms = ARRAY_SIZE(gen6_atoms);
445 } else {
446 atoms = gen4_atoms;
447 num_atoms = ARRAY_SIZE(gen4_atoms);
448 }
449
450 brw_clear_validated_bos(brw);
451
452 if (INTEL_DEBUG) {
453 /* Debug version which enforces various sanity checks on the
454 * state flags which are generated and checked to help ensure
455 * state atoms are ordered correctly in the list.
456 */
457 struct brw_state_flags examined, prev;
458 memset(&examined, 0, sizeof(examined));
459 prev = *state;
460
461 for (i = 0; i < num_atoms; i++) {
462 const struct brw_tracked_state *atom = atoms[i];
463 struct brw_state_flags generated;
464
465 assert(atom->dirty.mesa ||
466 atom->dirty.brw ||
467 atom->dirty.cache);
468
469 if (brw->intel.Fallback)
470 break;
471
472 if (check_state(state, &atom->dirty)) {
473 if (atom->emit) {
474 atom->emit( brw );
475 }
476 }
477
478 accumulate_state(&examined, &atom->dirty);
479
480 /* generated = (prev ^ state)
481 * if (examined & generated)
482 * fail;
483 */
484 xor_states(&generated, &prev, state);
485 assert(!check_state(&examined, &generated));
486 prev = *state;
487 }
488 }
489 else {
490 for (i = 0; i < num_atoms; i++) {
491 const struct brw_tracked_state *atom = atoms[i];
492
493 if (brw->intel.Fallback)
494 break;
495
496 if (check_state(state, &atom->dirty)) {
497 if (atom->emit) {
498 atom->emit( brw );
499 }
500 }
501 }
502 }
503
504 if (INTEL_DEBUG & DEBUG_STATE) {
505 brw_update_dirty_count(mesa_bits, state->mesa);
506 brw_update_dirty_count(brw_bits, state->brw);
507 brw_update_dirty_count(cache_bits, state->cache);
508 if (dirty_count++ % 1000 == 0) {
509 brw_print_dirty_count(mesa_bits, state->mesa);
510 brw_print_dirty_count(brw_bits, state->brw);
511 brw_print_dirty_count(cache_bits, state->cache);
512 fprintf(stderr, "\n");
513 }
514 }
515
516 if (!brw->intel.Fallback)
517 memset(state, 0, sizeof(*state));
518 }