Merge branch 'asm-shader-rework-2'
[mesa.git] / src / mesa / drivers / dri / i965 / brw_state_upload.c
1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33
34 #include "brw_context.h"
35 #include "brw_state.h"
36 #include "intel_batchbuffer.h"
37
38 /* This is used to initialize brw->state.atoms[]. We could use this
39 * list directly except for a single atom, brw_constant_buffer, which
40 * has a .dirty value which changes according to the parameters of the
41 * current fragment and vertex programs, and so cannot be a static
42 * value.
43 */
44 const struct brw_tracked_state *atoms[] =
45 {
46 &brw_check_fallback,
47
48 &brw_wm_input_sizes,
49 &brw_vs_prog,
50 &brw_gs_prog,
51 &brw_clip_prog,
52 &brw_sf_prog,
53 &brw_wm_prog,
54
55 /* Once all the programs are done, we know how large urb entry
56 * sizes need to be and can decide if we need to change the urb
57 * layout.
58 */
59 &brw_curbe_offsets,
60 &brw_recalculate_urb_fence,
61
62 &brw_cc_vp,
63 &brw_cc_unit,
64
65 &brw_vs_surfaces, /* must do before unit */
66 &brw_wm_constant_surface, /* must do before wm surfaces/bind bo */
67 &brw_wm_surfaces, /* must do before samplers and unit */
68 &brw_wm_samplers,
69
70 &brw_wm_unit,
71 &brw_sf_vp,
72 &brw_sf_unit,
73 &brw_vs_unit, /* always required, enabled or not */
74 &brw_clip_unit,
75 &brw_gs_unit,
76
77 /* Command packets:
78 */
79 &brw_invarient_state,
80 &brw_state_base_address,
81
82 &brw_binding_table_pointers,
83 &brw_blend_constant_color,
84
85 &brw_depthbuffer,
86
87 &brw_polygon_stipple,
88 &brw_polygon_stipple_offset,
89
90 &brw_line_stipple,
91 &brw_aa_line_parameters,
92
93 &brw_psp_urb_cbs,
94
95 &brw_drawing_rect,
96 &brw_indices,
97 &brw_index_buffer,
98 &brw_vertices,
99
100 &brw_constant_buffer
101 };
102
103
104 void brw_init_state( struct brw_context *brw )
105 {
106 brw_init_caches(brw);
107 }
108
109
110 void brw_destroy_state( struct brw_context *brw )
111 {
112 brw_destroy_caches(brw);
113 brw_destroy_batch_cache(brw);
114 }
115
116 /***********************************************************************
117 */
118
119 static GLboolean check_state( const struct brw_state_flags *a,
120 const struct brw_state_flags *b )
121 {
122 return ((a->mesa & b->mesa) ||
123 (a->brw & b->brw) ||
124 (a->cache & b->cache));
125 }
126
127 static void accumulate_state( struct brw_state_flags *a,
128 const struct brw_state_flags *b )
129 {
130 a->mesa |= b->mesa;
131 a->brw |= b->brw;
132 a->cache |= b->cache;
133 }
134
135
136 static void xor_states( struct brw_state_flags *result,
137 const struct brw_state_flags *a,
138 const struct brw_state_flags *b )
139 {
140 result->mesa = a->mesa ^ b->mesa;
141 result->brw = a->brw ^ b->brw;
142 result->cache = a->cache ^ b->cache;
143 }
144
145 static void
146 brw_clear_validated_bos(struct brw_context *brw)
147 {
148 int i;
149
150 /* Clear the last round of validated bos */
151 for (i = 0; i < brw->state.validated_bo_count; i++) {
152 dri_bo_unreference(brw->state.validated_bos[i]);
153 brw->state.validated_bos[i] = NULL;
154 }
155 brw->state.validated_bo_count = 0;
156 }
157
158 struct dirty_bit_map {
159 uint32_t bit;
160 char *name;
161 uint32_t count;
162 };
163
164 #define DEFINE_BIT(name) {name, #name, 0}
165
166 static struct dirty_bit_map mesa_bits[] = {
167 DEFINE_BIT(_NEW_MODELVIEW),
168 DEFINE_BIT(_NEW_PROJECTION),
169 DEFINE_BIT(_NEW_TEXTURE_MATRIX),
170 DEFINE_BIT(_NEW_COLOR_MATRIX),
171 DEFINE_BIT(_NEW_ACCUM),
172 DEFINE_BIT(_NEW_COLOR),
173 DEFINE_BIT(_NEW_DEPTH),
174 DEFINE_BIT(_NEW_EVAL),
175 DEFINE_BIT(_NEW_FOG),
176 DEFINE_BIT(_NEW_HINT),
177 DEFINE_BIT(_NEW_LIGHT),
178 DEFINE_BIT(_NEW_LINE),
179 DEFINE_BIT(_NEW_PIXEL),
180 DEFINE_BIT(_NEW_POINT),
181 DEFINE_BIT(_NEW_POLYGON),
182 DEFINE_BIT(_NEW_POLYGONSTIPPLE),
183 DEFINE_BIT(_NEW_SCISSOR),
184 DEFINE_BIT(_NEW_STENCIL),
185 DEFINE_BIT(_NEW_TEXTURE),
186 DEFINE_BIT(_NEW_TRANSFORM),
187 DEFINE_BIT(_NEW_VIEWPORT),
188 DEFINE_BIT(_NEW_PACKUNPACK),
189 DEFINE_BIT(_NEW_ARRAY),
190 DEFINE_BIT(_NEW_RENDERMODE),
191 DEFINE_BIT(_NEW_BUFFERS),
192 DEFINE_BIT(_NEW_MULTISAMPLE),
193 DEFINE_BIT(_NEW_TRACK_MATRIX),
194 DEFINE_BIT(_NEW_PROGRAM),
195 DEFINE_BIT(_NEW_PROGRAM_CONSTANTS),
196 {0, 0, 0}
197 };
198
199 static struct dirty_bit_map brw_bits[] = {
200 DEFINE_BIT(BRW_NEW_URB_FENCE),
201 DEFINE_BIT(BRW_NEW_FRAGMENT_PROGRAM),
202 DEFINE_BIT(BRW_NEW_VERTEX_PROGRAM),
203 DEFINE_BIT(BRW_NEW_INPUT_DIMENSIONS),
204 DEFINE_BIT(BRW_NEW_CURBE_OFFSETS),
205 DEFINE_BIT(BRW_NEW_REDUCED_PRIMITIVE),
206 DEFINE_BIT(BRW_NEW_PRIMITIVE),
207 DEFINE_BIT(BRW_NEW_CONTEXT),
208 DEFINE_BIT(BRW_NEW_WM_INPUT_DIMENSIONS),
209 DEFINE_BIT(BRW_NEW_PSP),
210 DEFINE_BIT(BRW_NEW_FENCE),
211 DEFINE_BIT(BRW_NEW_INDICES),
212 DEFINE_BIT(BRW_NEW_INDEX_BUFFER),
213 DEFINE_BIT(BRW_NEW_VERTICES),
214 DEFINE_BIT(BRW_NEW_BATCH),
215 DEFINE_BIT(BRW_NEW_DEPTH_BUFFER),
216 {0, 0, 0}
217 };
218
219 static struct dirty_bit_map cache_bits[] = {
220 DEFINE_BIT(CACHE_NEW_CC_VP),
221 DEFINE_BIT(CACHE_NEW_CC_UNIT),
222 DEFINE_BIT(CACHE_NEW_WM_PROG),
223 DEFINE_BIT(CACHE_NEW_SAMPLER_DEFAULT_COLOR),
224 DEFINE_BIT(CACHE_NEW_SAMPLER),
225 DEFINE_BIT(CACHE_NEW_WM_UNIT),
226 DEFINE_BIT(CACHE_NEW_SF_PROG),
227 DEFINE_BIT(CACHE_NEW_SF_VP),
228 DEFINE_BIT(CACHE_NEW_SF_UNIT),
229 DEFINE_BIT(CACHE_NEW_VS_UNIT),
230 DEFINE_BIT(CACHE_NEW_VS_PROG),
231 DEFINE_BIT(CACHE_NEW_GS_UNIT),
232 DEFINE_BIT(CACHE_NEW_GS_PROG),
233 DEFINE_BIT(CACHE_NEW_CLIP_VP),
234 DEFINE_BIT(CACHE_NEW_CLIP_UNIT),
235 DEFINE_BIT(CACHE_NEW_CLIP_PROG),
236 DEFINE_BIT(CACHE_NEW_SURFACE),
237 DEFINE_BIT(CACHE_NEW_SURF_BIND),
238 {0, 0, 0}
239 };
240
241
242 static void
243 brw_update_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
244 {
245 int i;
246
247 for (i = 0; i < 32; i++) {
248 if (bit_map[i].bit == 0)
249 return;
250
251 if (bit_map[i].bit & bits)
252 bit_map[i].count++;
253 }
254 }
255
256 static void
257 brw_print_dirty_count(struct dirty_bit_map *bit_map, int32_t bits)
258 {
259 int i;
260
261 for (i = 0; i < 32; i++) {
262 if (bit_map[i].bit == 0)
263 return;
264
265 fprintf(stderr, "0x%08x: %12d (%s)\n",
266 bit_map[i].bit, bit_map[i].count, bit_map[i].name);
267 }
268 }
269
270 /***********************************************************************
271 * Emit all state:
272 */
273 void brw_validate_state( struct brw_context *brw )
274 {
275 GLcontext *ctx = &brw->intel.ctx;
276 struct intel_context *intel = &brw->intel;
277 struct brw_state_flags *state = &brw->state.dirty;
278 GLuint i;
279
280 brw_clear_validated_bos(brw);
281
282 state->mesa |= brw->intel.NewGLState;
283 brw->intel.NewGLState = 0;
284
285 brw_add_validated_bo(brw, intel->batch->buf);
286
287 if (brw->emit_state_always) {
288 state->mesa |= ~0;
289 state->brw |= ~0;
290 state->cache |= ~0;
291 }
292
293 if (brw->fragment_program != ctx->FragmentProgram._Current) {
294 brw->fragment_program = ctx->FragmentProgram._Current;
295 brw->state.dirty.brw |= BRW_NEW_FRAGMENT_PROGRAM;
296 }
297
298 if (brw->vertex_program != ctx->VertexProgram._Current) {
299 brw->vertex_program = ctx->VertexProgram._Current;
300 brw->state.dirty.brw |= BRW_NEW_VERTEX_PROGRAM;
301 }
302
303 if (state->mesa == 0 &&
304 state->cache == 0 &&
305 state->brw == 0)
306 return;
307
308 if (brw->state.dirty.brw & BRW_NEW_CONTEXT)
309 brw_clear_batch_cache(brw);
310
311 brw->intel.Fallback = 0;
312
313 /* do prepare stage for all atoms */
314 for (i = 0; i < Elements(atoms); i++) {
315 const struct brw_tracked_state *atom = atoms[i];
316
317 if (brw->intel.Fallback)
318 break;
319
320 if (check_state(state, &atom->dirty)) {
321 if (atom->prepare) {
322 atom->prepare(brw);
323 }
324 }
325 }
326
327 /* Make sure that the textures which are referenced by the current
328 * brw fragment program are actually present/valid.
329 * If this fails, we can experience GPU lock-ups.
330 */
331 {
332 const struct brw_fragment_program *fp;
333 fp = brw_fragment_program_const(brw->fragment_program);
334 if (fp) {
335 assert((fp->tex_units_used & ctx->Texture._EnabledUnits)
336 == fp->tex_units_used);
337 }
338 }
339 }
340
341
342 void brw_upload_state(struct brw_context *brw)
343 {
344 struct brw_state_flags *state = &brw->state.dirty;
345 int i;
346 static int dirty_count = 0;
347
348 brw_clear_validated_bos(brw);
349
350 if (INTEL_DEBUG) {
351 /* Debug version which enforces various sanity checks on the
352 * state flags which are generated and checked to help ensure
353 * state atoms are ordered correctly in the list.
354 */
355 struct brw_state_flags examined, prev;
356 _mesa_memset(&examined, 0, sizeof(examined));
357 prev = *state;
358
359 for (i = 0; i < Elements(atoms); i++) {
360 const struct brw_tracked_state *atom = atoms[i];
361 struct brw_state_flags generated;
362
363 assert(atom->dirty.mesa ||
364 atom->dirty.brw ||
365 atom->dirty.cache);
366
367 if (brw->intel.Fallback)
368 break;
369
370 if (check_state(state, &atom->dirty)) {
371 if (atom->emit) {
372 atom->emit( brw );
373 }
374 }
375
376 accumulate_state(&examined, &atom->dirty);
377
378 /* generated = (prev ^ state)
379 * if (examined & generated)
380 * fail;
381 */
382 xor_states(&generated, &prev, state);
383 assert(!check_state(&examined, &generated));
384 prev = *state;
385 }
386 }
387 else {
388 for (i = 0; i < Elements(atoms); i++) {
389 const struct brw_tracked_state *atom = atoms[i];
390
391 if (brw->intel.Fallback)
392 break;
393
394 if (check_state(state, &atom->dirty)) {
395 if (atom->emit) {
396 atom->emit( brw );
397 }
398 }
399 }
400 }
401
402 if (INTEL_DEBUG & DEBUG_STATE) {
403 brw_update_dirty_count(mesa_bits, state->mesa);
404 brw_update_dirty_count(brw_bits, state->brw);
405 brw_update_dirty_count(cache_bits, state->cache);
406 if (dirty_count++ % 1000 == 0) {
407 brw_print_dirty_count(mesa_bits, state->mesa);
408 brw_print_dirty_count(brw_bits, state->brw);
409 brw_print_dirty_count(cache_bits, state->cache);
410 fprintf(stderr, "\n");
411 }
412 }
413
414 if (!brw->intel.Fallback)
415 memset(state, 0, sizeof(*state));
416 }