i965: Add untested passthrough GS setup.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 #if 0
110 &brw_wm_input_sizes,
111 &brw_vs_prog,
112 &brw_gs_prog,
113 &brw_clip_prog,
114 &brw_sf_prog,
115 &brw_wm_prog,
116
117 /* Once all the programs are done, we know how large urb entry
118 * sizes need to be and can decide if we need to change the urb
119 * layout.
120 */
121 &brw_curbe_offsets,
122 &brw_recalculate_urb_fence,
123
124 &brw_cc_vp,
125
126 #endif
127 &gen6_blend_state, /* must do before cc unit */
128 &gen6_color_calc_state, /* must do before cc unit */
129 &gen6_depth_stencil_state, /* must do before cc unit */
130 &gen6_cc_state_pointers,
131
132 &brw_vs_surfaces, /* must do before unit */
133 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
134 &brw_wm_surfaces, /* must do before samplers and unit */
135
136 &gen6_vs_state,
137 &gen6_gs_state,
138 #if 0
139 &brw_wm_samplers,
140
141 &brw_wm_unit,
142 &brw_sf_vp,
143 &brw_sf_unit,
144 &brw_clip_unit,
145
146 /* Command packets:
147 */
148 &brw_invarient_state,
149 &brw_state_base_address,
150
151 &brw_binding_table_pointers,
152 &brw_blend_constant_color,
153 #endif
154
155 &brw_depthbuffer,
156
157 #if 0
158 &brw_polygon_stipple,
159 &brw_polygon_stipple_offset,
160
161 &brw_line_stipple,
162 &brw_aa_line_parameters,
163
164 &brw_psp_urb_cbs,
165
166 &brw_drawing_rect,
167 &brw_indices,
168 &brw_index_buffer,
169 &brw_vertices,
170
171 &brw_constant_buffer
172 #endif
173 };
174
175 void brw_init_state( struct brw_context *brw )
176 {
177 brw_init_caches(brw);
178 }
179
180
181 void brw_destroy_state( struct brw_context *brw )
182 {
183 brw_destroy_caches(brw);
184 brw_destroy_batch_cache(brw);
185 }
186
187 /***********************************************************************
188 */
189
190 static GLboolean check_state( const struct brw_state_flags *a,
191 const struct brw_state_flags *b )
192 {
193 return ((a->mesa & b->mesa) ||
194 (a->brw & b->brw) ||
195 (a->cache & b->cache));
196 }
197
198 static void accumulate_state( struct brw_state_flags *a,
199 const struct brw_state_flags *b )
200 {
201 a->mesa |= b->mesa;
202 a->brw |= b->brw;
203 a->cache |= b->cache;
204 }
205
206
207 static void xor_states( struct brw_state_flags *result,
208 const struct brw_state_flags *a,
209 const struct brw_state_flags *b )
210 {
211 result->mesa = a->mesa ^ b->mesa;
212 result->brw = a->brw ^ b->brw;
213 result->cache = a->cache ^ b->cache;
214 }
215
216 void
217 brw_clear_validated_bos(struct brw_context *brw)
218 {
219 int i;
220
221 /* Clear the last round of validated bos */
222 for (i = 0; i < brw->state.validated_bo_count; i++) {
223 dri_bo_unreference(brw->state.validated_bos[i]);
224 brw->state.validated_bos[i] = NULL;
225 }
226 brw->state.validated_bo_count = 0;
227 }
228
229 struct dirty_bit_map {
230 uint32_t bit;
231 char *name;
232 uint32_t count;
233 };
234
235 #define DEFINE_BIT(name) {name, #name, 0}
236
237 static struct dirty_bit_map mesa_bits[] = {
238 DEFINE_BIT(_NEW_MODELVIEW),
239 DEFINE_BIT(_NEW_PROJECTION),
240 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
241 DEFINE_BIT(_NEW_COLOR_MATRIX),
242 DEFINE_BIT(_NEW_ACCUM),
243 DEFINE_BIT(_NEW_COLOR),
244 DEFINE_BIT(_NEW_DEPTH),
245 DEFINE_BIT(_NEW_EVAL),
246 DEFINE_BIT(_NEW_FOG),
247 DEFINE_BIT(_NEW_HINT),
248 DEFINE_BIT(_NEW_LIGHT),
249 DEFINE_BIT(_NEW_LINE),
250 DEFINE_BIT(_NEW_PIXEL),
251 DEFINE_BIT(_NEW_POINT),
252 DEFINE_BIT(_NEW_POLYGON),
253 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
254 DEFINE_BIT(_NEW_SCISSOR),
255 DEFINE_BIT(_NEW_STENCIL),
256 DEFINE_BIT(_NEW_TEXTURE),
257 DEFINE_BIT(_NEW_TRANSFORM),
258 DEFINE_BIT(_NEW_VIEWPORT),
259 DEFINE_BIT(_NEW_PACKUNPACK),
260 DEFINE_BIT(_NEW_ARRAY),
261 DEFINE_BIT(_NEW_RENDERMODE),
262 DEFINE_BIT(_NEW_BUFFERS),
263 DEFINE_BIT(_NEW_MULTISAMPLE),
264 DEFINE_BIT(_NEW_TRACK_MATRIX),
265 DEFINE_BIT(_NEW_PROGRAM),
266 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
267 {0, 0, 0}
268 };
269
270 static struct dirty_bit_map brw_bits[] = {
271 DEFINE_BIT(BRW_NEW_URB_FENCE),
272 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
273 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
274 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
275 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
276 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
277 DEFINE_BIT(BRW_NEW_PRIMITIVE),
278 DEFINE_BIT(BRW_NEW_CONTEXT),
279 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
280 DEFINE_BIT(BRW_NEW_PSP),
281 DEFINE_BIT(BRW_NEW_INDICES),
282 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
283 DEFINE_BIT(BRW_NEW_VERTICES),
284 DEFINE_BIT(BRW_NEW_BATCH),
285 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
286 {0, 0, 0}
287 };
288
289 static struct dirty_bit_map cache_bits[] = {
290 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
291 DEFINE_BIT(CACHE_NEW_CC_VP),
292 DEFINE_BIT(CACHE_NEW_CC_UNIT),
293 DEFINE_BIT(CACHE_NEW_WM_PROG),
294 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
295 DEFINE_BIT(CACHE_NEW_SAMPLER),
296 DEFINE_BIT(CACHE_NEW_WM_UNIT),
297 DEFINE_BIT(CACHE_NEW_SF_PROG),
298 DEFINE_BIT(CACHE_NEW_SF_VP),
299 DEFINE_BIT(CACHE_NEW_SF_UNIT),
300 DEFINE_BIT(CACHE_NEW_VS_UNIT),
301 DEFINE_BIT(CACHE_NEW_VS_PROG),
302 DEFINE_BIT(CACHE_NEW_GS_UNIT),
303 DEFINE_BIT(CACHE_NEW_GS_PROG),
304 DEFINE_BIT(CACHE_NEW_CLIP_VP),
305 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
306 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
307 DEFINE_BIT(CACHE_NEW_SURFACE),
308 DEFINE_BIT(CACHE_NEW_SURF_BIND),
309 {0, 0, 0}
310 };
311
312
313 static void
314 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
315 {
316 int i;
317
318 for (i = 0; i < 32; i++) {
319 if (bit_map[i].bit == 0)
320 return;
321
322 if (bit_map[i].bit & bits)
323 bit_map[i].count++;
324 }
325 }
326
327 static void
328 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
329 {
330 int i;
331
332 for (i = 0; i < 32; i++) {
333 if (bit_map[i].bit == 0)
334 return;
335
336 fprintf(stderr, "0x%08x: %12d (%s)\n",
337 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
338 }
339 }
340
341 /***********************************************************************
342 * Emit all state:
343 */
344 void brw_validate_state( struct brw_context *brw )
345 {
346 GLcontext *ctx = &brw->intel.ctx;
347 struct intel_context *intel = &brw->intel;
348 struct brw_state_flags *state = &brw->state.dirty;
349 GLuint i;
350 const struct brw_tracked_state **atoms;
351 int num_atoms;
352
353 brw_clear_validated_bos(brw);
354
355 state->mesa |= brw->intel.NewGLState;
356 brw->intel.NewGLState = 0;
357
358 brw_add_validated_bo(brw, intel->batch->buf);
359
360 if (IS_GEN6(intel->intelScreen->deviceID)) {
361 atoms = gen6_atoms;
362 num_atoms = ARRAY_SIZE(gen6_atoms);
363 } else {
364 atoms = gen4_atoms;
365 num_atoms = ARRAY_SIZE(gen4_atoms);
366 }
367
368 if (brw->emit_state_always) {
369 state->mesa |= ~0;
370 state->brw |= ~0;
371 state->cache |= ~0;
372 }
373
374 if (brw->fragment_program != ctx->FragmentProgram._Current) {
375 brw->fragment_program = ctx->FragmentProgram._Current;
376 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
377 }
378
379 if (brw->vertex_program != ctx->VertexProgram._Current) {
380 brw->vertex_program = ctx->VertexProgram._Current;
381 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
382 }
383
384 if (state->mesa == 0 &&
385 state->cache == 0 &&
386 state->brw == 0)
387 return;
388
389 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
390 brw_clear_batch_cache(brw);
391
392 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
393
394 /* do prepare stage for all atoms */
395 for (i = 0; i < num_atoms; i++) {
396 const struct brw_tracked_state *atom = atoms[i];
397
398 if (brw->intel.Fallback)
399 break;
400
401 if (check_state(state, &atom->dirty)) {
402 if (atom->prepare) {
403 atom->prepare(brw);
404 }
405 }
406 }
407
408 intel_check_front_buffer_rendering(intel);
409
410 /* Make sure that the textures which are referenced by the current
411 * brw fragment program are actually present/valid.
412 * If this fails, we can experience GPU lock-ups.
413 */
414 {
415 const struct brw_fragment_program *fp;
416 fp = brw_fragment_program_const(brw->fragment_program);
417 if (fp) {
418 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
419 == fp->tex_units_used);
420 }
421 }
422 }
423
424
425 void brw_upload_state(struct brw_context *brw)
426 {
427 struct intel_context *intel = &brw->intel;
428 struct brw_state_flags *state = &brw->state.dirty;
429 int i;
430 static int dirty_count = 0;
431 const struct brw_tracked_state **atoms;
432 int num_atoms;
433
434 if (IS_GEN6(intel->intelScreen->deviceID)) {
435 atoms = gen6_atoms;
436 num_atoms = ARRAY_SIZE(gen6_atoms);
437 } else {
438 atoms = gen4_atoms;
439 num_atoms = ARRAY_SIZE(gen4_atoms);
440 }
441
442 brw_clear_validated_bos(brw);
443
444 if (INTEL_DEBUG) {
445 /* Debug version which enforces various sanity checks on the
446 * state flags which are generated and checked to help ensure
447 * state atoms are ordered correctly in the list.
448 */
449 struct brw_state_flags examined, prev;
450 memset(&examined, 0, sizeof(examined));
451 prev = *state;
452
453 for (i = 0; i < num_atoms; i++) {
454 const struct brw_tracked_state *atom = atoms[i];
455 struct brw_state_flags generated;
456
457 assert(atom->dirty.mesa ||
458 atom->dirty.brw ||
459 atom->dirty.cache);
460
461 if (brw->intel.Fallback)
462 break;
463
464 if (check_state(state, &atom->dirty)) {
465 if (atom->emit) {
466 atom->emit( brw );
467 }
468 }
469
470 accumulate_state(&examined, &atom->dirty);
471
472 /* generated = (prev ^ state)
473 * if (examined & generated)
474 * fail;
475 */
476 xor_states(&generated, &prev, state);
477 assert(!check_state(&examined, &generated));
478 prev = *state;
479 }
480 }
481 else {
482 for (i = 0; i < num_atoms; i++) {
483 const struct brw_tracked_state *atom = atoms[i];
484
485 if (brw->intel.Fallback)
486 break;
487
488 if (check_state(state, &atom->dirty)) {
489 if (atom->emit) {
490 atom->emit( brw );
491 }
492 }
493 }
494 }
495
496 if (INTEL_DEBUG & DEBUG_STATE) {
497 brw_update_dirty_count(mesa_bits, state->mesa);
498 brw_update_dirty_count(brw_bits, state->brw);
499 brw_update_dirty_count(cache_bits, state->cache);
500 if (dirty_count++ % 1000 == 0) {
501 brw_print_dirty_count(mesa_bits, state->mesa);
502 brw_print_dirty_count(brw_bits, state->brw);
503 brw_print_dirty_count(cache_bits, state->cache);
504 fprintf(stderr, "\n");
505 }
506 }
507
508 if (!brw->intel.Fallback)
509 memset(state, 0, sizeof(*state));
510 }