i965: Reconnect the index/vertex setup.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 #if 0
113 &brw_sf_prog,
114 &brw_wm_prog,
115
116 /* Once all the programs are done, we know how large urb entry
117 * sizes need to be and can decide if we need to change the urb
118 * layout.
119 */
120 &brw_curbe_offsets,
121
122 &brw_cc_vp,
123
124 #endif
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129 &gen6_cc_state_pointers,
130
131 &brw_vs_surfaces, /* must do before unit */
132 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
133 &brw_wm_surfaces, /* must do before samplers and unit */
134
135 &gen6_vs_state,
136 &gen6_gs_state,
137 &gen6_clip_state,
138 #if 0
139 &brw_wm_samplers,
140
141 &brw_wm_unit,
142 &brw_sf_vp,
143 &brw_sf_unit,
144
145 /* Command packets:
146 */
147 &brw_invarient_state,
148 &brw_state_base_address,
149
150 &brw_binding_table_pointers,
151 &brw_blend_constant_color,
152 #endif
153
154 &brw_depthbuffer,
155
156 #if 0
157 &brw_polygon_stipple,
158 &brw_polygon_stipple_offset,
159
160 &brw_line_stipple,
161 &brw_aa_line_parameters,
162
163 &brw_psp_urb_cbs,
164
165 &brw_drawing_rect,
166 #endif
167
168 &brw_indices,
169 &brw_index_buffer,
170 &brw_vertices,
171 };
172
173 void brw_init_state( struct brw_context *brw )
174 {
175 brw_init_caches(brw);
176 }
177
178
179 void brw_destroy_state( struct brw_context *brw )
180 {
181 brw_destroy_caches(brw);
182 brw_destroy_batch_cache(brw);
183 }
184
185 /***********************************************************************
186 */
187
188 static GLboolean check_state( const struct brw_state_flags *a,
189 const struct brw_state_flags *b )
190 {
191 return ((a->mesa & b->mesa) ||
192 (a->brw & b->brw) ||
193 (a->cache & b->cache));
194 }
195
196 static void accumulate_state( struct brw_state_flags *a,
197 const struct brw_state_flags *b )
198 {
199 a->mesa |= b->mesa;
200 a->brw |= b->brw;
201 a->cache |= b->cache;
202 }
203
204
205 static void xor_states( struct brw_state_flags *result,
206 const struct brw_state_flags *a,
207 const struct brw_state_flags *b )
208 {
209 result->mesa = a->mesa ^ b->mesa;
210 result->brw = a->brw ^ b->brw;
211 result->cache = a->cache ^ b->cache;
212 }
213
214 void
215 brw_clear_validated_bos(struct brw_context *brw)
216 {
217 int i;
218
219 /* Clear the last round of validated bos */
220 for (i = 0; i < brw->state.validated_bo_count; i++) {
221 dri_bo_unreference(brw->state.validated_bos[i]);
222 brw->state.validated_bos[i] = NULL;
223 }
224 brw->state.validated_bo_count = 0;
225 }
226
227 struct dirty_bit_map {
228 uint32_t bit;
229 char *name;
230 uint32_t count;
231 };
232
233 #define DEFINE_BIT(name) {name, #name, 0}
234
235 static struct dirty_bit_map mesa_bits[] = {
236 DEFINE_BIT(_NEW_MODELVIEW),
237 DEFINE_BIT(_NEW_PROJECTION),
238 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
239 DEFINE_BIT(_NEW_COLOR_MATRIX),
240 DEFINE_BIT(_NEW_ACCUM),
241 DEFINE_BIT(_NEW_COLOR),
242 DEFINE_BIT(_NEW_DEPTH),
243 DEFINE_BIT(_NEW_EVAL),
244 DEFINE_BIT(_NEW_FOG),
245 DEFINE_BIT(_NEW_HINT),
246 DEFINE_BIT(_NEW_LIGHT),
247 DEFINE_BIT(_NEW_LINE),
248 DEFINE_BIT(_NEW_PIXEL),
249 DEFINE_BIT(_NEW_POINT),
250 DEFINE_BIT(_NEW_POLYGON),
251 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
252 DEFINE_BIT(_NEW_SCISSOR),
253 DEFINE_BIT(_NEW_STENCIL),
254 DEFINE_BIT(_NEW_TEXTURE),
255 DEFINE_BIT(_NEW_TRANSFORM),
256 DEFINE_BIT(_NEW_VIEWPORT),
257 DEFINE_BIT(_NEW_PACKUNPACK),
258 DEFINE_BIT(_NEW_ARRAY),
259 DEFINE_BIT(_NEW_RENDERMODE),
260 DEFINE_BIT(_NEW_BUFFERS),
261 DEFINE_BIT(_NEW_MULTISAMPLE),
262 DEFINE_BIT(_NEW_TRACK_MATRIX),
263 DEFINE_BIT(_NEW_PROGRAM),
264 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
265 {0, 0, 0}
266 };
267
268 static struct dirty_bit_map brw_bits[] = {
269 DEFINE_BIT(BRW_NEW_URB_FENCE),
270 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
271 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
272 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
273 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
274 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
275 DEFINE_BIT(BRW_NEW_PRIMITIVE),
276 DEFINE_BIT(BRW_NEW_CONTEXT),
277 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
278 DEFINE_BIT(BRW_NEW_PSP),
279 DEFINE_BIT(BRW_NEW_INDICES),
280 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
281 DEFINE_BIT(BRW_NEW_VERTICES),
282 DEFINE_BIT(BRW_NEW_BATCH),
283 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
284 {0, 0, 0}
285 };
286
287 static struct dirty_bit_map cache_bits[] = {
288 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
289 DEFINE_BIT(CACHE_NEW_CC_VP),
290 DEFINE_BIT(CACHE_NEW_CC_UNIT),
291 DEFINE_BIT(CACHE_NEW_WM_PROG),
292 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
293 DEFINE_BIT(CACHE_NEW_SAMPLER),
294 DEFINE_BIT(CACHE_NEW_WM_UNIT),
295 DEFINE_BIT(CACHE_NEW_SF_PROG),
296 DEFINE_BIT(CACHE_NEW_SF_VP),
297 DEFINE_BIT(CACHE_NEW_SF_UNIT),
298 DEFINE_BIT(CACHE_NEW_VS_UNIT),
299 DEFINE_BIT(CACHE_NEW_VS_PROG),
300 DEFINE_BIT(CACHE_NEW_GS_UNIT),
301 DEFINE_BIT(CACHE_NEW_GS_PROG),
302 DEFINE_BIT(CACHE_NEW_CLIP_VP),
303 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
304 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
305 DEFINE_BIT(CACHE_NEW_SURFACE),
306 DEFINE_BIT(CACHE_NEW_SURF_BIND),
307 {0, 0, 0}
308 };
309
310
311 static void
312 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
313 {
314 int i;
315
316 for (i = 0; i < 32; i++) {
317 if (bit_map[i].bit == 0)
318 return;
319
320 if (bit_map[i].bit & bits)
321 bit_map[i].count++;
322 }
323 }
324
325 static void
326 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
327 {
328 int i;
329
330 for (i = 0; i < 32; i++) {
331 if (bit_map[i].bit == 0)
332 return;
333
334 fprintf(stderr, "0x%08x: %12d (%s)\n",
335 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
336 }
337 }
338
339 /***********************************************************************
340 * Emit all state:
341 */
342 void brw_validate_state( struct brw_context *brw )
343 {
344 GLcontext *ctx = &brw->intel.ctx;
345 struct intel_context *intel = &brw->intel;
346 struct brw_state_flags *state = &brw->state.dirty;
347 GLuint i;
348 const struct brw_tracked_state **atoms;
349 int num_atoms;
350
351 brw_clear_validated_bos(brw);
352
353 state->mesa |= brw->intel.NewGLState;
354 brw->intel.NewGLState = 0;
355
356 brw_add_validated_bo(brw, intel->batch->buf);
357
358 if (IS_GEN6(intel->intelScreen->deviceID)) {
359 atoms = gen6_atoms;
360 num_atoms = ARRAY_SIZE(gen6_atoms);
361 } else {
362 atoms = gen4_atoms;
363 num_atoms = ARRAY_SIZE(gen4_atoms);
364 }
365
366 if (brw->emit_state_always) {
367 state->mesa |= ~0;
368 state->brw |= ~0;
369 state->cache |= ~0;
370 }
371
372 if (brw->fragment_program != ctx->FragmentProgram._Current) {
373 brw->fragment_program = ctx->FragmentProgram._Current;
374 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
375 }
376
377 if (brw->vertex_program != ctx->VertexProgram._Current) {
378 brw->vertex_program = ctx->VertexProgram._Current;
379 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
380 }
381
382 if (state->mesa == 0 &&
383 state->cache == 0 &&
384 state->brw == 0)
385 return;
386
387 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
388 brw_clear_batch_cache(brw);
389
390 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
391
392 /* do prepare stage for all atoms */
393 for (i = 0; i < num_atoms; i++) {
394 const struct brw_tracked_state *atom = atoms[i];
395
396 if (brw->intel.Fallback)
397 break;
398
399 if (check_state(state, &atom->dirty)) {
400 if (atom->prepare) {
401 atom->prepare(brw);
402 }
403 }
404 }
405
406 intel_check_front_buffer_rendering(intel);
407
408 /* Make sure that the textures which are referenced by the current
409 * brw fragment program are actually present/valid.
410 * If this fails, we can experience GPU lock-ups.
411 */
412 {
413 const struct brw_fragment_program *fp;
414 fp = brw_fragment_program_const(brw->fragment_program);
415 if (fp) {
416 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
417 == fp->tex_units_used);
418 }
419 }
420 }
421
422
423 void brw_upload_state(struct brw_context *brw)
424 {
425 struct intel_context *intel = &brw->intel;
426 struct brw_state_flags *state = &brw->state.dirty;
427 int i;
428 static int dirty_count = 0;
429 const struct brw_tracked_state **atoms;
430 int num_atoms;
431
432 if (IS_GEN6(intel->intelScreen->deviceID)) {
433 atoms = gen6_atoms;
434 num_atoms = ARRAY_SIZE(gen6_atoms);
435 } else {
436 atoms = gen4_atoms;
437 num_atoms = ARRAY_SIZE(gen4_atoms);
438 }
439
440 brw_clear_validated_bos(brw);
441
442 if (INTEL_DEBUG) {
443 /* Debug version which enforces various sanity checks on the
444 * state flags which are generated and checked to help ensure
445 * state atoms are ordered correctly in the list.
446 */
447 struct brw_state_flags examined, prev;
448 memset(&examined, 0, sizeof(examined));
449 prev = *state;
450
451 for (i = 0; i < num_atoms; i++) {
452 const struct brw_tracked_state *atom = atoms[i];
453 struct brw_state_flags generated;
454
455 assert(atom->dirty.mesa ||
456 atom->dirty.brw ||
457 atom->dirty.cache);
458
459 if (brw->intel.Fallback)
460 break;
461
462 if (check_state(state, &atom->dirty)) {
463 if (atom->emit) {
464 atom->emit( brw );
465 }
466 }
467
468 accumulate_state(&examined, &atom->dirty);
469
470 /* generated = (prev ^ state)
471 * if (examined & generated)
472 * fail;
473 */
474 xor_states(&generated, &prev, state);
475 assert(!check_state(&examined, &generated));
476 prev = *state;
477 }
478 }
479 else {
480 for (i = 0; i < num_atoms; i++) {
481 const struct brw_tracked_state *atom = atoms[i];
482
483 if (brw->intel.Fallback)
484 break;
485
486 if (check_state(state, &atom->dirty)) {
487 if (atom->emit) {
488 atom->emit( brw );
489 }
490 }
491 }
492 }
493
494 if (INTEL_DEBUG & DEBUG_STATE) {
495 brw_update_dirty_count(mesa_bits, state->mesa);
496 brw_update_dirty_count(brw_bits, state->brw);
497 brw_update_dirty_count(cache_bits, state->cache);
498 if (dirty_count++ % 1000 == 0) {
499 brw_print_dirty_count(mesa_bits, state->mesa);
500 brw_print_dirty_count(brw_bits, state->brw);
501 brw_print_dirty_count(cache_bits, state->cache);
502 fprintf(stderr, "\n");
503 }
504 }
505
506 if (!brw->intel.Fallback)
507 memset(state, 0, sizeof(*state));
508 }