i965: Enable DRAWING_RECTANGLE emit on Sandybridge.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 #if 0
113 &brw_sf_prog,
114 &brw_wm_prog,
115
116 /* Once all the programs are done, we know how large urb entry
117 * sizes need to be and can decide if we need to change the urb
118 * layout.
119 */
120 &brw_curbe_offsets,
121
122 &brw_cc_vp,
123
124 #endif
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129 &gen6_cc_state_pointers,
130
131 &brw_vs_surfaces, /* must do before unit */
132 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
133 &brw_wm_surfaces, /* must do before samplers and unit */
134
135 &gen6_vs_state,
136 &gen6_gs_state,
137 &gen6_clip_state,
138 #if 0
139 &brw_wm_samplers,
140
141 &brw_wm_unit,
142 #endif
143 &gen6_scissor_state,
144 #if 0
145 &brw_sf_vp,
146 &brw_sf_unit,
147
148 /* Command packets:
149 */
150 &brw_invarient_state,
151 #endif
152
153 &brw_state_base_address,
154
155 #if 0
156 &brw_binding_table_pointers,
157 &brw_blend_constant_color,
158 #endif
159
160 &brw_depthbuffer,
161
162 #if 0
163 &brw_polygon_stipple,
164 &brw_polygon_stipple_offset,
165
166 &brw_line_stipple,
167 &brw_aa_line_parameters,
168
169 &brw_psp_urb_cbs,
170 #endif
171
172 &brw_drawing_rect,
173
174 &brw_indices,
175 &brw_index_buffer,
176 &brw_vertices,
177 };
178
179 void brw_init_state( struct brw_context *brw )
180 {
181 brw_init_caches(brw);
182 }
183
184
185 void brw_destroy_state( struct brw_context *brw )
186 {
187 brw_destroy_caches(brw);
188 brw_destroy_batch_cache(brw);
189 }
190
191 /***********************************************************************
192 */
193
194 static GLboolean check_state( const struct brw_state_flags *a,
195 const struct brw_state_flags *b )
196 {
197 return ((a->mesa & b->mesa) ||
198 (a->brw & b->brw) ||
199 (a->cache & b->cache));
200 }
201
202 static void accumulate_state( struct brw_state_flags *a,
203 const struct brw_state_flags *b )
204 {
205 a->mesa |= b->mesa;
206 a->brw |= b->brw;
207 a->cache |= b->cache;
208 }
209
210
211 static void xor_states( struct brw_state_flags *result,
212 const struct brw_state_flags *a,
213 const struct brw_state_flags *b )
214 {
215 result->mesa = a->mesa ^ b->mesa;
216 result->brw = a->brw ^ b->brw;
217 result->cache = a->cache ^ b->cache;
218 }
219
220 void
221 brw_clear_validated_bos(struct brw_context *brw)
222 {
223 int i;
224
225 /* Clear the last round of validated bos */
226 for (i = 0; i < brw->state.validated_bo_count; i++) {
227 dri_bo_unreference(brw->state.validated_bos[i]);
228 brw->state.validated_bos[i] = NULL;
229 }
230 brw->state.validated_bo_count = 0;
231 }
232
233 struct dirty_bit_map {
234 uint32_t bit;
235 char *name;
236 uint32_t count;
237 };
238
239 #define DEFINE_BIT(name) {name, #name, 0}
240
241 static struct dirty_bit_map mesa_bits[] = {
242 DEFINE_BIT(_NEW_MODELVIEW),
243 DEFINE_BIT(_NEW_PROJECTION),
244 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
245 DEFINE_BIT(_NEW_COLOR_MATRIX),
246 DEFINE_BIT(_NEW_ACCUM),
247 DEFINE_BIT(_NEW_COLOR),
248 DEFINE_BIT(_NEW_DEPTH),
249 DEFINE_BIT(_NEW_EVAL),
250 DEFINE_BIT(_NEW_FOG),
251 DEFINE_BIT(_NEW_HINT),
252 DEFINE_BIT(_NEW_LIGHT),
253 DEFINE_BIT(_NEW_LINE),
254 DEFINE_BIT(_NEW_PIXEL),
255 DEFINE_BIT(_NEW_POINT),
256 DEFINE_BIT(_NEW_POLYGON),
257 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
258 DEFINE_BIT(_NEW_SCISSOR),
259 DEFINE_BIT(_NEW_STENCIL),
260 DEFINE_BIT(_NEW_TEXTURE),
261 DEFINE_BIT(_NEW_TRANSFORM),
262 DEFINE_BIT(_NEW_VIEWPORT),
263 DEFINE_BIT(_NEW_PACKUNPACK),
264 DEFINE_BIT(_NEW_ARRAY),
265 DEFINE_BIT(_NEW_RENDERMODE),
266 DEFINE_BIT(_NEW_BUFFERS),
267 DEFINE_BIT(_NEW_MULTISAMPLE),
268 DEFINE_BIT(_NEW_TRACK_MATRIX),
269 DEFINE_BIT(_NEW_PROGRAM),
270 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
271 {0, 0, 0}
272 };
273
274 static struct dirty_bit_map brw_bits[] = {
275 DEFINE_BIT(BRW_NEW_URB_FENCE),
276 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
277 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
278 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
279 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
280 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
281 DEFINE_BIT(BRW_NEW_PRIMITIVE),
282 DEFINE_BIT(BRW_NEW_CONTEXT),
283 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
284 DEFINE_BIT(BRW_NEW_PSP),
285 DEFINE_BIT(BRW_NEW_INDICES),
286 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
287 DEFINE_BIT(BRW_NEW_VERTICES),
288 DEFINE_BIT(BRW_NEW_BATCH),
289 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
290 {0, 0, 0}
291 };
292
293 static struct dirty_bit_map cache_bits[] = {
294 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
295 DEFINE_BIT(CACHE_NEW_CC_VP),
296 DEFINE_BIT(CACHE_NEW_CC_UNIT),
297 DEFINE_BIT(CACHE_NEW_WM_PROG),
298 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
299 DEFINE_BIT(CACHE_NEW_SAMPLER),
300 DEFINE_BIT(CACHE_NEW_WM_UNIT),
301 DEFINE_BIT(CACHE_NEW_SF_PROG),
302 DEFINE_BIT(CACHE_NEW_SF_VP),
303 DEFINE_BIT(CACHE_NEW_SF_UNIT),
304 DEFINE_BIT(CACHE_NEW_VS_UNIT),
305 DEFINE_BIT(CACHE_NEW_VS_PROG),
306 DEFINE_BIT(CACHE_NEW_GS_UNIT),
307 DEFINE_BIT(CACHE_NEW_GS_PROG),
308 DEFINE_BIT(CACHE_NEW_CLIP_VP),
309 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
310 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
311 DEFINE_BIT(CACHE_NEW_SURFACE),
312 DEFINE_BIT(CACHE_NEW_SURF_BIND),
313 {0, 0, 0}
314 };
315
316
317 static void
318 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
319 {
320 int i;
321
322 for (i = 0; i < 32; i++) {
323 if (bit_map[i].bit == 0)
324 return;
325
326 if (bit_map[i].bit & bits)
327 bit_map[i].count++;
328 }
329 }
330
331 static void
332 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
333 {
334 int i;
335
336 for (i = 0; i < 32; i++) {
337 if (bit_map[i].bit == 0)
338 return;
339
340 fprintf(stderr, "0x%08x: %12d (%s)\n",
341 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
342 }
343 }
344
345 /***********************************************************************
346 * Emit all state:
347 */
348 void brw_validate_state( struct brw_context *brw )
349 {
350 GLcontext *ctx = &brw->intel.ctx;
351 struct intel_context *intel = &brw->intel;
352 struct brw_state_flags *state = &brw->state.dirty;
353 GLuint i;
354 const struct brw_tracked_state **atoms;
355 int num_atoms;
356
357 brw_clear_validated_bos(brw);
358
359 state->mesa |= brw->intel.NewGLState;
360 brw->intel.NewGLState = 0;
361
362 brw_add_validated_bo(brw, intel->batch->buf);
363
364 if (IS_GEN6(intel->intelScreen->deviceID)) {
365 atoms = gen6_atoms;
366 num_atoms = ARRAY_SIZE(gen6_atoms);
367 } else {
368 atoms = gen4_atoms;
369 num_atoms = ARRAY_SIZE(gen4_atoms);
370 }
371
372 if (brw->emit_state_always) {
373 state->mesa |= ~0;
374 state->brw |= ~0;
375 state->cache |= ~0;
376 }
377
378 if (brw->fragment_program != ctx->FragmentProgram._Current) {
379 brw->fragment_program = ctx->FragmentProgram._Current;
380 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
381 }
382
383 if (brw->vertex_program != ctx->VertexProgram._Current) {
384 brw->vertex_program = ctx->VertexProgram._Current;
385 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
386 }
387
388 if (state->mesa == 0 &&
389 state->cache == 0 &&
390 state->brw == 0)
391 return;
392
393 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
394 brw_clear_batch_cache(brw);
395
396 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
397
398 /* do prepare stage for all atoms */
399 for (i = 0; i < num_atoms; i++) {
400 const struct brw_tracked_state *atom = atoms[i];
401
402 if (brw->intel.Fallback)
403 break;
404
405 if (check_state(state, &atom->dirty)) {
406 if (atom->prepare) {
407 atom->prepare(brw);
408 }
409 }
410 }
411
412 intel_check_front_buffer_rendering(intel);
413
414 /* Make sure that the textures which are referenced by the current
415 * brw fragment program are actually present/valid.
416 * If this fails, we can experience GPU lock-ups.
417 */
418 {
419 const struct brw_fragment_program *fp;
420 fp = brw_fragment_program_const(brw->fragment_program);
421 if (fp) {
422 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
423 == fp->tex_units_used);
424 }
425 }
426 }
427
428
429 void brw_upload_state(struct brw_context *brw)
430 {
431 struct intel_context *intel = &brw->intel;
432 struct brw_state_flags *state = &brw->state.dirty;
433 int i;
434 static int dirty_count = 0;
435 const struct brw_tracked_state **atoms;
436 int num_atoms;
437
438 if (IS_GEN6(intel->intelScreen->deviceID)) {
439 atoms = gen6_atoms;
440 num_atoms = ARRAY_SIZE(gen6_atoms);
441 } else {
442 atoms = gen4_atoms;
443 num_atoms = ARRAY_SIZE(gen4_atoms);
444 }
445
446 brw_clear_validated_bos(brw);
447
448 if (INTEL_DEBUG) {
449 /* Debug version which enforces various sanity checks on the
450 * state flags which are generated and checked to help ensure
451 * state atoms are ordered correctly in the list.
452 */
453 struct brw_state_flags examined, prev;
454 memset(&examined, 0, sizeof(examined));
455 prev = *state;
456
457 for (i = 0; i < num_atoms; i++) {
458 const struct brw_tracked_state *atom = atoms[i];
459 struct brw_state_flags generated;
460
461 assert(atom->dirty.mesa ||
462 atom->dirty.brw ||
463 atom->dirty.cache);
464
465 if (brw->intel.Fallback)
466 break;
467
468 if (check_state(state, &atom->dirty)) {
469 if (atom->emit) {
470 atom->emit( brw );
471 }
472 }
473
474 accumulate_state(&examined, &atom->dirty);
475
476 /* generated = (prev ^ state)
477 * if (examined & generated)
478 * fail;
479 */
480 xor_states(&generated, &prev, state);
481 assert(!check_state(&examined, &generated));
482 prev = *state;
483 }
484 }
485 else {
486 for (i = 0; i < num_atoms; i++) {
487 const struct brw_tracked_state *atom = atoms[i];
488
489 if (brw->intel.Fallback)
490 break;
491
492 if (check_state(state, &atom->dirty)) {
493 if (atom->emit) {
494 atom->emit( brw );
495 }
496 }
497 }
498 }
499
500 if (INTEL_DEBUG & DEBUG_STATE) {
501 brw_update_dirty_count(mesa_bits, state->mesa);
502 brw_update_dirty_count(brw_bits, state->brw);
503 brw_update_dirty_count(cache_bits, state->cache);
504 if (dirty_count++ % 1000 == 0) {
505 brw_print_dirty_count(mesa_bits, state->mesa);
506 brw_print_dirty_count(brw_bits, state->brw);
507 brw_print_dirty_count(cache_bits, state->cache);
508 fprintf(stderr, "\n");
509 }
510 }
511
512 if (!brw->intel.Fallback)
513 memset(state, 0, sizeof(*state));
514 }