intel: Change dri_bo_* to drm_intel_bo* to consistently use new API.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37 #include "intel_buffers.h"
38 #include "intel_chipset.h"
39
40 /* This is used to initialize brw->state.atoms[]. We could use this
41 * list directly except for a single atom, brw_constant_buffer, which
42 * has a .dirty value which changes according to the parameters of the
43 * current fragment and vertex programs, and so cannot be a static
44 * value.
45 */
46 static const struct brw_tracked_state *gen4_atoms[] =
47 {
48 &brw_check_fallback,
49
50 &brw_wm_input_sizes,
51 &brw_vs_prog,
52 &brw_gs_prog,
53 &brw_clip_prog,
54 &brw_sf_prog,
55 &brw_wm_prog,
56
57 /* Once all the programs are done, we know how large urb entry
58 * sizes need to be and can decide if we need to change the urb
59 * layout.
60 */
61 &brw_curbe_offsets,
62 &brw_recalculate_urb_fence,
63
64 &brw_cc_vp,
65 &brw_cc_unit,
66
67 &brw_vs_surfaces, /* must do before unit */
68 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
69 &brw_wm_surfaces, /* must do before samplers and unit */
70 &brw_wm_samplers,
71
72 &brw_wm_unit,
73 &brw_sf_vp,
74 &brw_sf_unit,
75 &brw_vs_unit, /* always required, enabled or not */
76 &brw_clip_unit,
77 &brw_gs_unit,
78
79 /* Command packets:
80 */
81 &brw_invarient_state,
82 &brw_state_base_address,
83
84 &brw_binding_table_pointers,
85 &brw_blend_constant_color,
86
87 &brw_depthbuffer,
88
89 &brw_polygon_stipple,
90 &brw_polygon_stipple_offset,
91
92 &brw_line_stipple,
93 &brw_aa_line_parameters,
94
95 &brw_psp_urb_cbs,
96
97 &brw_drawing_rect,
98 &brw_indices,
99 &brw_index_buffer,
100 &brw_vertices,
101
102 &brw_constant_buffer
103 };
104
105 const struct brw_tracked_state *gen6_atoms[] =
106 {
107 &brw_check_fallback,
108
109 &brw_wm_input_sizes,
110 &brw_vs_prog,
111 &brw_gs_prog,
112 &brw_wm_prog,
113
114 &gen6_clip_vp,
115 &gen6_sf_vp,
116 &gen6_cc_vp,
117
118 /* Command packets: */
119 &brw_invarient_state,
120
121 &gen6_viewport_state, /* must do after *_vp stages */
122
123 &gen6_urb,
124 &gen6_blend_state, /* must do before cc unit */
125 &gen6_color_calc_state, /* must do before cc unit */
126 &gen6_depth_stencil_state, /* must do before cc unit */
127 &gen6_cc_state_pointers,
128
129 &brw_vs_surfaces, /* must do before unit */
130 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
131 &brw_wm_surfaces, /* must do before samplers and unit */
132
133 &brw_wm_samplers,
134 &gen6_sampler_state,
135
136 &gen6_vs_state,
137 &gen6_gs_state,
138 &gen6_clip_state,
139 &gen6_sf_state,
140 &gen6_wm_state,
141
142 &gen6_scissor_state,
143
144 &brw_state_base_address,
145
146 &gen6_binding_table_pointers,
147
148 &brw_depthbuffer,
149
150 &brw_polygon_stipple,
151 &brw_polygon_stipple_offset,
152
153 &brw_line_stipple,
154 &brw_aa_line_parameters,
155
156 &brw_drawing_rect,
157
158 &brw_indices,
159 &brw_index_buffer,
160 &brw_vertices,
161 };
162
163 void brw_init_state( struct brw_context *brw )
164 {
165 brw_init_caches(brw);
166 }
167
168
169 void brw_destroy_state( struct brw_context *brw )
170 {
171 brw_destroy_caches(brw);
172 brw_destroy_batch_cache(brw);
173 }
174
175 /***********************************************************************
176 */
177
178 static GLboolean check_state( const struct brw_state_flags *a,
179 const struct brw_state_flags *b )
180 {
181 return ((a->mesa & b->mesa) ||
182 (a->brw & b->brw) ||
183 (a->cache & b->cache));
184 }
185
186 static void accumulate_state( struct brw_state_flags *a,
187 const struct brw_state_flags *b )
188 {
189 a->mesa |= b->mesa;
190 a->brw |= b->brw;
191 a->cache |= b->cache;
192 }
193
194
195 static void xor_states( struct brw_state_flags *result,
196 const struct brw_state_flags *a,
197 const struct brw_state_flags *b )
198 {
199 result->mesa = a->mesa ^ b->mesa;
200 result->brw = a->brw ^ b->brw;
201 result->cache = a->cache ^ b->cache;
202 }
203
204 void
205 brw_clear_validated_bos(struct brw_context *brw)
206 {
207 int i;
208
209 /* Clear the last round of validated bos */
210 for (i = 0; i < brw->state.validated_bo_count; i++) {
211 drm_intel_bo_unreference(brw->state.validated_bos[i]);
212 brw->state.validated_bos[i] = NULL;
213 }
214 brw->state.validated_bo_count = 0;
215 }
216
217 struct dirty_bit_map {
218 uint32_t bit;
219 char *name;
220 uint32_t count;
221 };
222
223 #define DEFINE_BIT(name) {name, #name, 0}
224
225 static struct dirty_bit_map mesa_bits[] = {
226 DEFINE_BIT(_NEW_MODELVIEW),
227 DEFINE_BIT(_NEW_PROJECTION),
228 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
229 DEFINE_BIT(_NEW_COLOR_MATRIX),
230 DEFINE_BIT(_NEW_ACCUM),
231 DEFINE_BIT(_NEW_COLOR),
232 DEFINE_BIT(_NEW_DEPTH),
233 DEFINE_BIT(_NEW_EVAL),
234 DEFINE_BIT(_NEW_FOG),
235 DEFINE_BIT(_NEW_HINT),
236 DEFINE_BIT(_NEW_LIGHT),
237 DEFINE_BIT(_NEW_LINE),
238 DEFINE_BIT(_NEW_PIXEL),
239 DEFINE_BIT(_NEW_POINT),
240 DEFINE_BIT(_NEW_POLYGON),
241 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
242 DEFINE_BIT(_NEW_SCISSOR),
243 DEFINE_BIT(_NEW_STENCIL),
244 DEFINE_BIT(_NEW_TEXTURE),
245 DEFINE_BIT(_NEW_TRANSFORM),
246 DEFINE_BIT(_NEW_VIEWPORT),
247 DEFINE_BIT(_NEW_PACKUNPACK),
248 DEFINE_BIT(_NEW_ARRAY),
249 DEFINE_BIT(_NEW_RENDERMODE),
250 DEFINE_BIT(_NEW_BUFFERS),
251 DEFINE_BIT(_NEW_MULTISAMPLE),
252 DEFINE_BIT(_NEW_TRACK_MATRIX),
253 DEFINE_BIT(_NEW_PROGRAM),
254 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
255 {0, 0, 0}
256 };
257
258 static struct dirty_bit_map brw_bits[] = {
259 DEFINE_BIT(BRW_NEW_URB_FENCE),
260 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
261 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
262 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
263 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
264 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
265 DEFINE_BIT(BRW_NEW_PRIMITIVE),
266 DEFINE_BIT(BRW_NEW_CONTEXT),
267 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
268 DEFINE_BIT(BRW_NEW_PSP),
269 DEFINE_BIT(BRW_NEW_INDICES),
270 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
271 DEFINE_BIT(BRW_NEW_VERTICES),
272 DEFINE_BIT(BRW_NEW_BATCH),
273 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
274 {0, 0, 0}
275 };
276
277 static struct dirty_bit_map cache_bits[] = {
278 DEFINE_BIT(CACHE_NEW_BLEND_STATE),
279 DEFINE_BIT(CACHE_NEW_CC_VP),
280 DEFINE_BIT(CACHE_NEW_CC_UNIT),
281 DEFINE_BIT(CACHE_NEW_WM_PROG),
282 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
283 DEFINE_BIT(CACHE_NEW_SAMPLER),
284 DEFINE_BIT(CACHE_NEW_WM_UNIT),
285 DEFINE_BIT(CACHE_NEW_SF_PROG),
286 DEFINE_BIT(CACHE_NEW_SF_VP),
287 DEFINE_BIT(CACHE_NEW_SF_UNIT),
288 DEFINE_BIT(CACHE_NEW_VS_UNIT),
289 DEFINE_BIT(CACHE_NEW_VS_PROG),
290 DEFINE_BIT(CACHE_NEW_GS_UNIT),
291 DEFINE_BIT(CACHE_NEW_GS_PROG),
292 DEFINE_BIT(CACHE_NEW_CLIP_VP),
293 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
294 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
295 DEFINE_BIT(CACHE_NEW_SURFACE),
296 DEFINE_BIT(CACHE_NEW_SURF_BIND),
297 {0, 0, 0}
298 };
299
300
301 static void
302 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
303 {
304 int i;
305
306 for (i = 0; i < 32; i++) {
307 if (bit_map[i].bit == 0)
308 return;
309
310 if (bit_map[i].bit & bits)
311 bit_map[i].count++;
312 }
313 }
314
315 static void
316 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
317 {
318 int i;
319
320 for (i = 0; i < 32; i++) {
321 if (bit_map[i].bit == 0)
322 return;
323
324 fprintf(stderr, "0x%08x: %12d (%s)\n",
325 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
326 }
327 }
328
329 /***********************************************************************
330 * Emit all state:
331 */
332 void brw_validate_state( struct brw_context *brw )
333 {
334 GLcontext *ctx = &brw->intel.ctx;
335 struct intel_context *intel = &brw->intel;
336 struct brw_state_flags *state = &brw->state.dirty;
337 GLuint i;
338 const struct brw_tracked_state **atoms;
339 int num_atoms;
340
341 brw_clear_validated_bos(brw);
342
343 state->mesa |= brw->intel.NewGLState;
344 brw->intel.NewGLState = 0;
345
346 brw_add_validated_bo(brw, intel->batch->buf);
347
348 if (IS_GEN6(intel->intelScreen->deviceID)) {
349 atoms = gen6_atoms;
350 num_atoms = ARRAY_SIZE(gen6_atoms);
351 } else {
352 atoms = gen4_atoms;
353 num_atoms = ARRAY_SIZE(gen4_atoms);
354 }
355
356 if (brw->emit_state_always) {
357 state->mesa |= ~0;
358 state->brw |= ~0;
359 state->cache |= ~0;
360 }
361
362 if (brw->fragment_program != ctx->FragmentProgram._Current) {
363 brw->fragment_program = ctx->FragmentProgram._Current;
364 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
365 }
366
367 if (brw->vertex_program != ctx->VertexProgram._Current) {
368 brw->vertex_program = ctx->VertexProgram._Current;
369 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
370 }
371
372 if (state->mesa == 0 &&
373 state->cache == 0 &&
374 state->brw == 0)
375 return;
376
377 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
378 brw_clear_batch_cache(brw);
379
380 brw->intel.Fallback = GL_FALSE; /* boolean, not bitfield */
381
382 /* do prepare stage for all atoms */
383 for (i = 0; i < num_atoms; i++) {
384 const struct brw_tracked_state *atom = atoms[i];
385
386 if (brw->intel.Fallback)
387 break;
388
389 if (check_state(state, &atom->dirty)) {
390 if (atom->prepare) {
391 atom->prepare(brw);
392 }
393 }
394 }
395
396 intel_check_front_buffer_rendering(intel);
397
398 /* Make sure that the textures which are referenced by the current
399 * brw fragment program are actually present/valid.
400 * If this fails, we can experience GPU lock-ups.
401 */
402 {
403 const struct brw_fragment_program *fp;
404 fp = brw_fragment_program_const(brw->fragment_program);
405 if (fp) {
406 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
407 == fp->tex_units_used);
408 }
409 }
410 }
411
412
413 void brw_upload_state(struct brw_context *brw)
414 {
415 struct intel_context *intel = &brw->intel;
416 struct brw_state_flags *state = &brw->state.dirty;
417 int i;
418 static int dirty_count = 0;
419 const struct brw_tracked_state **atoms;
420 int num_atoms;
421
422 if (IS_GEN6(intel->intelScreen->deviceID)) {
423 atoms = gen6_atoms;
424 num_atoms = ARRAY_SIZE(gen6_atoms);
425 } else {
426 atoms = gen4_atoms;
427 num_atoms = ARRAY_SIZE(gen4_atoms);
428 }
429
430 brw_clear_validated_bos(brw);
431
432 if (INTEL_DEBUG) {
433 /* Debug version which enforces various sanity checks on the
434 * state flags which are generated and checked to help ensure
435 * state atoms are ordered correctly in the list.
436 */
437 struct brw_state_flags examined, prev;
438 memset(&examined, 0, sizeof(examined));
439 prev = *state;
440
441 for (i = 0; i < num_atoms; i++) {
442 const struct brw_tracked_state *atom = atoms[i];
443 struct brw_state_flags generated;
444
445 assert(atom->dirty.mesa ||
446 atom->dirty.brw ||
447 atom->dirty.cache);
448
449 if (brw->intel.Fallback)
450 break;
451
452 if (check_state(state, &atom->dirty)) {
453 if (atom->emit) {
454 atom->emit( brw );
455 }
456 }
457
458 accumulate_state(&examined, &atom->dirty);
459
460 /* generated = (prev ^ state)
461 * if (examined & generated)
462 * fail;
463 */
464 xor_states(&generated, &prev, state);
465 assert(!check_state(&examined, &generated));
466 prev = *state;
467 }
468 }
469 else {
470 for (i = 0; i < num_atoms; i++) {
471 const struct brw_tracked_state *atom = atoms[i];
472
473 if (brw->intel.Fallback)
474 break;
475
476 if (check_state(state, &atom->dirty)) {
477 if (atom->emit) {
478 atom->emit( brw );
479 }
480 }
481 }
482 }
483
484 if (INTEL_DEBUG & DEBUG_STATE) {
485 brw_update_dirty_count(mesa_bits, state->mesa);
486 brw_update_dirty_count(brw_bits, state->brw);
487 brw_update_dirty_count(cache_bits, state->cache);
488 if (dirty_count++ % 1000 == 0) {
489 brw_print_dirty_count(mesa_bits, state->mesa);
490 brw_print_dirty_count(brw_bits, state->brw);
491 brw_print_dirty_count(cache_bits, state->cache);
492 fprintf(stderr, "\n");
493 }
494 }
495
496 if (!brw->intel.Fallback)
497 memset(state, 0, sizeof(*state));
498 }