i965: Split constant buffer setup from its surface state/binding state.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_unit,
65
66 &brw_vs_constants, /* Before vs_surfaces and constant_buffer */
67 &brw_wm_constants, /* Before wm_surfaces and constant_buffer */
68
69 &brw_vs_surfaces, /* must do before unit */
70 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
71 &brw_wm_surfaces, /* must do before samplers and unit */
72 &brw_wm_samplers,
73
74 &brw_wm_unit,
75 &brw_sf_vp,
76 &brw_sf_unit,
77 &brw_vs_unit, /* always required, enabled or not */
78 &brw_clip_unit,
79 &brw_gs_unit,
80
81 /* Command packets:
82 */
83 &brw_invarient_state,
84 &brw_state_base_address,
85
86 &brw_binding_table_pointers,
87 &brw_blend_constant_color,
88
89 &brw_depthbuffer,
90
91 &brw_polygon_stipple,
92 &brw_polygon_stipple_offset,
93
94 &brw_line_stipple,
95 &brw_aa_line_parameters,
96
97 &brw_psp_urb_cbs,
98
99 &brw_drawing_rect,
100 &brw_indices,
101 &brw_index_buffer,
102 &brw_vertices,
103
104 &brw_constant_buffer
105 };
106
107 const struct brw_tracked_state *gen6_atoms[] =
108 {
109 &brw_check_fallback,
110
111 &brw_wm_input_sizes,
112 &brw_vs_prog,
113 &brw_gs_prog,
114 &brw_wm_prog,
115
116 &gen6_clip_vp,
117 &gen6_sf_vp,
118 &gen6_cc_vp,
119
120 /* Command packets: */
121 &brw_invarient_state,
122
123 &gen6_viewport_state, /* must do after *_vp stages */
124
125 &gen6_urb,
126 &gen6_blend_state, /* must do before cc unit */
127 &gen6_color_calc_state, /* must do before cc unit */
128 &gen6_depth_stencil_state, /* must do before cc unit */
129 &gen6_cc_state_pointers,
130
131 &brw_vs_surfaces, /* must do before unit */
132 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
133 &brw_wm_surfaces, /* must do before samplers and unit */
134
135 &brw_wm_samplers,
136 &gen6_sampler_state,
137
138 &gen6_vs_state,
139 &gen6_gs_state,
140 &gen6_clip_state,
141 &gen6_sf_state,
142 &gen6_wm_state,
143
144 &gen6_scissor_state,
145
146 &brw_state_base_address,
147
148 &gen6_binding_table_pointers,
149
150 &brw_depthbuffer,
151
152 &brw_polygon_stipple,
153 &brw_polygon_stipple_offset,
154
155 &brw_line_stipple,
156 &brw_aa_line_parameters,
157
158 &brw_drawing_rect,
159
160 &brw_indices,
161 &brw_index_buffer,
162 &brw_vertices,
163 };
164
165 void brw_init_state( struct brw_context *brw )
166 {
167 brw_init_caches(brw);
168 }
169
170
171 void brw_destroy_state( struct brw_context *brw )
172 {
173 brw_destroy_caches(brw);
174 brw_destroy_batch_cache(brw);
175 }
176
177 /***********************************************************************
178 */
179
180 static GLboolean check_state( const struct brw_state_flags *a,
181 const struct brw_state_flags *b )
182 {
183 return ((a->mesa & b->mesa) ||
184 (a->brw & b->brw) ||
185 (a->cache & b->cache));
186 }
187
188 static void accumulate_state( struct brw_state_flags *a,
189 const struct brw_state_flags *b )
190 {
191 a->mesa |= b->mesa;
192 a->brw |= b->brw;
193 a->cache |= b->cache;
194 }
195
196
197 static void xor_states( struct brw_state_flags *result,
198 const struct brw_state_flags *a,
199 const struct brw_state_flags *b )
200 {
201 result->mesa = a->mesa ^ b->mesa;
202 result->brw = a->brw ^ b->brw;
203 result->cache = a->cache ^ b->cache;
204 }
205
206 void
207 brw_clear_validated_bos(struct brw_context *brw)
208 {
209 int i;
210
211 /* Clear the last round of validated bos */
212 for (i = 0; i < brw->state.validated_bo_count; i++) {
213 drm_intel_bo_unreference(brw->state.validated_bos[i]);
214 brw->state.validated_bos[i] = NULL;
215 }
216 brw->state.validated_bo_count = 0;
217 }
218
219 struct dirty_bit_map {
220 uint32_t bit;
221 char *name;
222 uint32_t count;
223 };
224
225 #define DEFINE_BIT(name) {name, #name, 0}
226
227 static struct dirty_bit_map mesa_bits[] = {
228 DEFINE_BIT(_NEW_MODELVIEW),
229 DEFINE_BIT(_NEW_PROJECTION),
230 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
231 DEFINE_BIT(_NEW_COLOR_MATRIX),
232 DEFINE_BIT(_NEW_ACCUM),
233 DEFINE_BIT(_NEW_COLOR),
234 DEFINE_BIT(_NEW_DEPTH),
235 DEFINE_BIT(_NEW_EVAL),
236 DEFINE_BIT(_NEW_FOG),
237 DEFINE_BIT(_NEW_HINT),
238 DEFINE_BIT(_NEW_LIGHT),
239 DEFINE_BIT(_NEW_LINE),
240 DEFINE_BIT(_NEW_PIXEL),
241 DEFINE_BIT(_NEW_POINT),
242 DEFINE_BIT(_NEW_POLYGON),
243 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
244 DEFINE_BIT(_NEW_SCISSOR),
245 DEFINE_BIT(_NEW_STENCIL),
246 DEFINE_BIT(_NEW_TEXTURE),
247 DEFINE_BIT(_NEW_TRANSFORM),
248 DEFINE_BIT(_NEW_VIEWPORT),
249 DEFINE_BIT(_NEW_PACKUNPACK),
250 DEFINE_BIT(_NEW_ARRAY),
251 DEFINE_BIT(_NEW_RENDERMODE),
252 DEFINE_BIT(_NEW_BUFFERS),
253 DEFINE_BIT(_NEW_MULTISAMPLE),
254 DEFINE_BIT(_NEW_TRACK_MATRIX),
255 DEFINE_BIT(_NEW_PROGRAM),
256 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
257 {0, 0, 0}
258 };
259
260 static struct dirty_bit_map brw_bits[] = {
261 DEFINE_BIT(BRW_NEW_URB_FENCE),
262 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
263 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
264 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
265 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
266 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
267 DEFINE_BIT(BRW_NEW_PRIMITIVE),
268 DEFINE_BIT(BRW_NEW_CONTEXT),
269 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
270 DEFINE_BIT(BRW_NEW_PSP),
271 DEFINE_BIT(BRW_NEW_INDICES),
272 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
273 DEFINE_BIT(BRW_NEW_VERTICES),
274 DEFINE_BIT(BRW_NEW_BATCH),
275 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
276 {0, 0, 0}
277 };
278
279 static struct dirty_bit_map cache_bits[] = {
280 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
281 DEFINE_BIT(CACHE_NEW_CC_VP),
282 DEFINE_BIT(CACHE_NEW_CC_UNIT),
283 DEFINE_BIT(CACHE_NEW_WM_PROG),
284 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
285 DEFINE_BIT(CACHE_NEW_SAMPLER),
286 DEFINE_BIT(CACHE_NEW_WM_UNIT),
287 DEFINE_BIT(CACHE_NEW_SF_PROG),
288 DEFINE_BIT(CACHE_NEW_SF_VP),
289 DEFINE_BIT(CACHE_NEW_SF_UNIT),
290 DEFINE_BIT(CACHE_NEW_VS_UNIT),
291 DEFINE_BIT(CACHE_NEW_VS_PROG),
292 DEFINE_BIT(CACHE_NEW_GS_UNIT),
293 DEFINE_BIT(CACHE_NEW_GS_PROG),
294 DEFINE_BIT(CACHE_NEW_CLIP_VP),
295 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
296 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
297 DEFINE_BIT(CACHE_NEW_SURFACE),
298 DEFINE_BIT(CACHE_NEW_SURF_BIND),
299 {0, 0, 0}
300 };
301
302
303 static void
304 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
305 {
306 int i;
307
308 for (i = 0; i < 32; i++) {
309 if (bit_map[i].bit == 0)
310 return;
311
312 if (bit_map[i].bit & bits)
313 bit_map[i].count++;
314 }
315 }
316
317 static void
318 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
319 {
320 int i;
321
322 for (i = 0; i < 32; i++) {
323 if (bit_map[i].bit == 0)
324 return;
325
326 fprintf(stderr, "0x%08x: %12d (%s)\n",
327 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
328 }
329 }
330
331 /***********************************************************************
332 * Emit all state:
333 */
334 void brw_validate_state( struct brw_context *brw )
335 {
336 GLcontext *ctx = &brw->intel.ctx;
337 struct intel_context *intel = &brw->intel;
338 struct brw_state_flags *state = &brw->state.dirty;
339 GLuint i;
340 const struct brw_tracked_state **atoms;
341 int num_atoms;
342
343 brw_clear_validated_bos(brw);
344
345 state->mesa |= brw->intel.NewGLState;
346 brw->intel.NewGLState = 0;
347
348 brw_add_validated_bo(brw, intel->batch->buf);
349
350 if (IS_GEN6(intel->intelScreen->deviceID)) {
351 atoms = gen6_atoms;
352 num_atoms = ARRAY_SIZE(gen6_atoms);
353 } else {
354 atoms = gen4_atoms;
355 num_atoms = ARRAY_SIZE(gen4_atoms);
356 }
357
358 if (brw->emit_state_always) {
359 state->mesa |= ~0;
360 state->brw |= ~0;
361 state->cache |= ~0;
362 }
363
364 if (brw->fragment_program != ctx->FragmentProgram._Current) {
365 brw->fragment_program = ctx->FragmentProgram._Current;
366 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
367 }
368
369 if (brw->vertex_program != ctx->VertexProgram._Current) {
370 brw->vertex_program = ctx->VertexProgram._Current;
371 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
372 }
373
374 if (state->mesa == 0 &&
375 state->cache == 0 &&
376 state->brw == 0)
377 return;
378
379 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
380 brw_clear_batch_cache(brw);
381
382 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
383
384 /* do prepare stage for all atoms */
385 for (i = 0; i < num_atoms; i++) {
386 const struct brw_tracked_state *atom = atoms[i];
387
388 if (brw->intel.Fallback)
389 break;
390
391 if (check_state(state, &atom->dirty)) {
392 if (atom->prepare) {
393 atom->prepare(brw);
394 }
395 }
396 }
397
398 intel_check_front_buffer_rendering(intel);
399
400 /* Make sure that the textures which are referenced by the current
401 * brw fragment program are actually present/valid.
402 * If this fails, we can experience GPU lock-ups.
403 */
404 {
405 const struct brw_fragment_program *fp;
406 fp = brw_fragment_program_const(brw->fragment_program);
407 if (fp) {
408 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
409 == fp->tex_units_used);
410 }
411 }
412 }
413
414
415 void brw_upload_state(struct brw_context *brw)
416 {
417 struct intel_context *intel = &brw->intel;
418 struct brw_state_flags *state = &brw->state.dirty;
419 int i;
420 static int dirty_count = 0;
421 const struct brw_tracked_state **atoms;
422 int num_atoms;
423
424 if (IS_GEN6(intel->intelScreen->deviceID)) {
425 atoms = gen6_atoms;
426 num_atoms = ARRAY_SIZE(gen6_atoms);
427 } else {
428 atoms = gen4_atoms;
429 num_atoms = ARRAY_SIZE(gen4_atoms);
430 }
431
432 brw_clear_validated_bos(brw);
433
434 if (INTEL_DEBUG) {
435 /* Debug version which enforces various sanity checks on the
436 * state flags which are generated and checked to help ensure
437 * state atoms are ordered correctly in the list.
438 */
439 struct brw_state_flags examined, prev;
440 memset(&examined, 0, sizeof(examined));
441 prev = *state;
442
443 for (i = 0; i < num_atoms; i++) {
444 const struct brw_tracked_state *atom = atoms[i];
445 struct brw_state_flags generated;
446
447 assert(atom->dirty.mesa ||
448 atom->dirty.brw ||
449 atom->dirty.cache);
450
451 if (brw->intel.Fallback)
452 break;
453
454 if (check_state(state, &atom->dirty)) {
455 if (atom->emit) {
456 atom->emit( brw );
457 }
458 }
459
460 accumulate_state(&examined, &atom->dirty);
461
462 /* generated = (prev ^ state)
463 * if (examined & generated)
464 * fail;
465 */
466 xor_states(&generated, &prev, state);
467 assert(!check_state(&examined, &generated));
468 prev = *state;
469 }
470 }
471 else {
472 for (i = 0; i < num_atoms; i++) {
473 const struct brw_tracked_state *atom = atoms[i];
474
475 if (brw->intel.Fallback)
476 break;
477
478 if (check_state(state, &atom->dirty)) {
479 if (atom->emit) {
480 atom->emit( brw );
481 }
482 }
483 }
484 }
485
486 if (INTEL_DEBUG & DEBUG_STATE) {
487 brw_update_dirty_count(mesa_bits, state->mesa);
488 brw_update_dirty_count(brw_bits, state->brw);
489 brw_update_dirty_count(cache_bits, state->cache);
490 if (dirty_count++ % 1000 == 0) {
491 brw_print_dirty_count(mesa_bits, state->mesa);
492 brw_print_dirty_count(brw_bits, state->brw);
493 brw_print_dirty_count(cache_bits, state->cache);
494 fprintf(stderr, "\n");
495 }
496 }
497
498 if (!brw->intel.Fallback)
499 memset(state, 0, sizeof(*state));
500 }