2 * Copyright © 2007 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
28 #include "main/mtypes.h"
29 #include "intel_batchbuffer.h"
31 #include "brw_context.h"
32 #include "brw_defines.h"
35 batch_out(struct brw_context
*brw
, const char *name
, uint32_t offset
,
36 int index
, char *fmt
, ...) PRINTFLIKE(5, 6);
39 batch_out(struct brw_context
*brw
, const char *name
, uint32_t offset
,
40 int index
, char *fmt
, ...)
42 struct intel_context
*intel
= &brw
->intel
;
43 uint32_t *data
= intel
->batch
.bo
->virtual + offset
;
46 fprintf(stderr
, "0x%08x: 0x%08x: %8s: ",
47 offset
+ index
* 4, data
[index
], name
);
49 vfprintf(stderr
, fmt
, va
);
54 get_965_surfacetype(unsigned int surfacetype
)
56 switch (surfacetype
) {
60 case 3: return "CUBE";
61 case 4: return "BUFFER";
62 case 7: return "NULL";
63 default: return "unknown";
68 get_965_surface_format(unsigned int surface_format
)
70 switch (surface_format
) {
71 case 0x000: return "r32g32b32a32_float";
72 case 0x0c1: return "b8g8r8a8_unorm";
73 case 0x100: return "b5g6r5_unorm";
74 case 0x102: return "b5g5r5a1_unorm";
75 case 0x104: return "b4g4r4a4_unorm";
76 default: return "unknown";
80 static void dump_vs_state(struct brw_context
*brw
, uint32_t offset
)
82 struct intel_context
*intel
= &brw
->intel
;
83 const char *name
= "VS_STATE";
84 struct brw_vs_unit_state
*vs
= intel
->batch
.bo
->virtual + offset
;
86 batch_out(brw
, name
, offset
, 0, "thread0\n");
87 batch_out(brw
, name
, offset
, 1, "thread1\n");
88 batch_out(brw
, name
, offset
, 2, "thread2\n");
89 batch_out(brw
, name
, offset
, 3, "thread3\n");
90 batch_out(brw
, name
, offset
, 4, "thread4: %d threads\n",
91 vs
->thread4
.max_threads
+ 1);
92 batch_out(brw
, name
, offset
, 5, "vs5\n");
93 batch_out(brw
, name
, offset
, 6, "vs6\n");
96 static void dump_gs_state(struct brw_context
*brw
, uint32_t offset
)
98 struct intel_context
*intel
= &brw
->intel
;
99 const char *name
= "GS_STATE";
100 struct brw_gs_unit_state
*gs
= intel
->batch
.bo
->virtual + offset
;
102 batch_out(brw
, name
, offset
, 0, "thread0\n");
103 batch_out(brw
, name
, offset
, 1, "thread1\n");
104 batch_out(brw
, name
, offset
, 2, "thread2\n");
105 batch_out(brw
, name
, offset
, 3, "thread3\n");
106 batch_out(brw
, name
, offset
, 4, "thread4: %d threads\n",
107 gs
->thread4
.max_threads
+ 1);
108 batch_out(brw
, name
, offset
, 5, "vs5\n");
109 batch_out(brw
, name
, offset
, 6, "vs6\n");
112 static void dump_clip_state(struct brw_context
*brw
, uint32_t offset
)
114 struct intel_context
*intel
= &brw
->intel
;
115 const char *name
= "CLIP_STATE";
116 struct brw_clip_unit_state
*clip
= intel
->batch
.bo
->virtual + offset
;
118 batch_out(brw
, name
, offset
, 0, "thread0\n");
119 batch_out(brw
, name
, offset
, 1, "thread1\n");
120 batch_out(brw
, name
, offset
, 2, "thread2\n");
121 batch_out(brw
, name
, offset
, 3, "thread3\n");
122 batch_out(brw
, name
, offset
, 4, "thread4: %d threads\n",
123 clip
->thread4
.max_threads
+ 1);
124 batch_out(brw
, name
, offset
, 5, "clip5\n");
125 batch_out(brw
, name
, offset
, 6, "clip6\n");
126 batch_out(brw
, name
, offset
, 7, "vp xmin %f\n", clip
->viewport_xmin
);
127 batch_out(brw
, name
, offset
, 8, "vp xmax %f\n", clip
->viewport_xmax
);
128 batch_out(brw
, name
, offset
, 9, "vp ymin %f\n", clip
->viewport_ymin
);
129 batch_out(brw
, name
, offset
, 10, "vp ymax %f\n", clip
->viewport_ymax
);
132 static void dump_sf_state(struct brw_context
*brw
, uint32_t offset
)
134 struct intel_context
*intel
= &brw
->intel
;
135 const char *name
= "SF_STATE";
136 struct brw_sf_unit_state
*sf
= intel
->batch
.bo
->virtual + offset
;
138 batch_out(brw
, name
, offset
, 0, "thread0\n");
139 batch_out(brw
, name
, offset
, 1, "thread1\n");
140 batch_out(brw
, name
, offset
, 2, "thread2\n");
141 batch_out(brw
, name
, offset
, 3, "thread3\n");
142 batch_out(brw
, name
, offset
, 4, "thread4: %d threads\n",
143 sf
->thread4
.max_threads
+ 1);
144 batch_out(brw
, name
, offset
, 5, "sf5: viewport offset\n");
145 batch_out(brw
, name
, offset
, 6, "sf6\n");
146 batch_out(brw
, name
, offset
, 7, "sf7\n");
149 static void dump_wm_state(struct brw_context
*brw
, uint32_t offset
)
151 struct intel_context
*intel
= &brw
->intel
;
152 const char *name
= "WM_STATE";
153 struct brw_wm_unit_state
*wm
= intel
->batch
.bo
->virtual + offset
;
155 batch_out(brw
, name
, offset
, 0, "thread0\n");
156 batch_out(brw
, name
, offset
, 1, "thread1\n");
157 batch_out(brw
, name
, offset
, 2, "thread2\n");
158 batch_out(brw
, name
, offset
, 3, "thread3\n");
159 batch_out(brw
, name
, offset
, 4, "wm4\n");
160 batch_out(brw
, name
, offset
, 5, "wm5: %s%s%s%s%s%s, %d threads\n",
161 wm
->wm5
.enable_8_pix
? "8pix" : "",
162 wm
->wm5
.enable_16_pix
? "16pix" : "",
163 wm
->wm5
.program_uses_depth
? ", uses depth" : "",
164 wm
->wm5
.program_computes_depth
? ", computes depth" : "",
165 wm
->wm5
.program_uses_killpixel
? ", kills" : "",
166 wm
->wm5
.thread_dispatch_enable
? "" : ", no dispatch",
167 wm
->wm5
.max_threads
+ 1);
168 batch_out(brw
, name
, offset
, 6, "depth offset constant %f\n",
169 wm
->global_depth_offset_constant
);
170 batch_out(brw
, name
, offset
, 7, "depth offset scale %f\n",
171 wm
->global_depth_offset_scale
);
172 batch_out(brw
, name
, offset
, 8, "wm8: kernel 1 (gen5+)\n");
173 batch_out(brw
, name
, offset
, 9, "wm9: kernel 2 (gen5+)\n");
174 batch_out(brw
, name
, offset
, 10, "wm10: kernel 3 (gen5+)\n");
177 static void dump_surface_state(struct brw_context
*brw
, uint32_t offset
)
179 const char *name
= "SURF";
180 uint32_t *surf
= brw
->intel
.batch
.bo
->virtual + offset
;
182 batch_out(brw
, name
, offset
, 0, "%s %s\n",
183 get_965_surfacetype(GET_FIELD(surf
[0], BRW_SURFACE_TYPE
)),
184 get_965_surface_format(GET_FIELD(surf
[0], BRW_SURFACE_FORMAT
)));
185 batch_out(brw
, name
, offset
, 1, "offset\n");
186 batch_out(brw
, name
, offset
, 2, "%dx%d size, %d mips\n",
187 GET_FIELD(surf
[2], BRW_SURFACE_WIDTH
) + 1,
188 GET_FIELD(surf
[2], BRW_SURFACE_HEIGHT
) + 1,
189 GET_FIELD(surf
[2], BRW_SURFACE_LOD
));
190 batch_out(brw
, name
, offset
, 3, "pitch %d, %s tiled\n",
191 GET_FIELD(surf
[3], BRW_SURFACE_PITCH
) + 1,
192 (surf
[3] & BRW_SURFACE_TILED
) ?
193 ((surf
[3] & BRW_SURFACE_TILED_Y
) ? "Y" : "X") : "not");
194 batch_out(brw
, name
, offset
, 4, "mip base %d\n",
195 GET_FIELD(surf
[4], BRW_SURFACE_MIN_LOD
));
196 batch_out(brw
, name
, offset
, 5, "x,y offset: %d,%d\n",
197 GET_FIELD(surf
[5], BRW_SURFACE_X_OFFSET
),
198 GET_FIELD(surf
[5], BRW_SURFACE_Y_OFFSET
));
201 static void dump_gen7_surface_state(struct brw_context
*brw
, uint32_t offset
)
203 const char *name
= "SURF";
204 struct gen7_surface_state
*surf
= brw
->intel
.batch
.bo
->virtual + offset
;
206 batch_out(brw
, name
, offset
, 0, "%s %s\n",
207 get_965_surfacetype(surf
->ss0
.surface_type
),
208 get_965_surface_format(surf
->ss0
.surface_format
));
209 batch_out(brw
, name
, offset
, 1, "offset\n");
210 batch_out(brw
, name
, offset
, 2, "%dx%d size, %d mips\n",
211 surf
->ss2
.width
+ 1, surf
->ss2
.height
+ 1, surf
->ss5
.mip_count
);
212 batch_out(brw
, name
, offset
, 3, "pitch %d, %stiled\n",
213 surf
->ss3
.pitch
+ 1, surf
->ss0
.tiled_surface
? "" : "not ");
214 batch_out(brw
, name
, offset
, 4, "mip base %d\n",
216 batch_out(brw
, name
, offset
, 5, "x,y offset: %d,%d\n",
217 surf
->ss5
.x_offset
, surf
->ss5
.y_offset
);
221 dump_sdc(struct brw_context
*brw
, uint32_t offset
)
223 const char *name
= "SDC";
224 struct intel_context
*intel
= &brw
->intel
;
226 if (intel
->gen
>= 5 && intel
->gen
<= 6) {
227 struct gen5_sampler_default_color
*sdc
= (intel
->batch
.bo
->virtual +
229 batch_out(brw
, name
, offset
, 0, "unorm rgba\n");
230 batch_out(brw
, name
, offset
, 1, "r %f\n", sdc
->f
[0]);
231 batch_out(brw
, name
, offset
, 2, "b %f\n", sdc
->f
[1]);
232 batch_out(brw
, name
, offset
, 3, "g %f\n", sdc
->f
[2]);
233 batch_out(brw
, name
, offset
, 4, "a %f\n", sdc
->f
[3]);
234 batch_out(brw
, name
, offset
, 5, "half float rg\n");
235 batch_out(brw
, name
, offset
, 6, "half float ba\n");
236 batch_out(brw
, name
, offset
, 7, "u16 rg\n");
237 batch_out(brw
, name
, offset
, 8, "u16 ba\n");
238 batch_out(brw
, name
, offset
, 9, "s16 rg\n");
239 batch_out(brw
, name
, offset
, 10, "s16 ba\n");
240 batch_out(brw
, name
, offset
, 11, "s8 rgba\n");
242 struct brw_sampler_default_color
*sdc
= (intel
->batch
.bo
->virtual +
244 batch_out(brw
, name
, offset
, 0, "r %f\n", sdc
->color
[0]);
245 batch_out(brw
, name
, offset
, 1, "g %f\n", sdc
->color
[1]);
246 batch_out(brw
, name
, offset
, 2, "b %f\n", sdc
->color
[2]);
247 batch_out(brw
, name
, offset
, 3, "a %f\n", sdc
->color
[3]);
251 static void dump_sampler_state(struct brw_context
*brw
,
252 uint32_t offset
, uint32_t size
)
254 struct intel_context
*intel
= &brw
->intel
;
256 struct brw_sampler_state
*samp
= intel
->batch
.bo
->virtual + offset
;
258 assert(intel
->gen
< 7);
260 for (i
= 0; i
< size
/ sizeof(*samp
); i
++) {
263 sprintf(name
, "WM SAMP%d", i
);
264 batch_out(brw
, name
, offset
, 0, "filtering\n");
265 batch_out(brw
, name
, offset
, 1, "wrapping, lod\n");
266 batch_out(brw
, name
, offset
, 2, "default color pointer\n");
267 batch_out(brw
, name
, offset
, 3, "chroma key, aniso\n");
270 offset
+= sizeof(*samp
);
274 static void dump_gen7_sampler_state(struct brw_context
*brw
,
275 uint32_t offset
, uint32_t size
)
277 struct intel_context
*intel
= &brw
->intel
;
278 struct gen7_sampler_state
*samp
= intel
->batch
.bo
->virtual + offset
;
281 assert(intel
->gen
>= 7);
283 for (i
= 0; i
< size
/ sizeof(*samp
); i
++) {
286 sprintf(name
, "WM SAMP%d", i
);
287 batch_out(brw
, name
, offset
, 0, "filtering\n");
288 batch_out(brw
, name
, offset
, 1, "wrapping, lod\n");
289 batch_out(brw
, name
, offset
, 2, "default color pointer\n");
290 batch_out(brw
, name
, offset
, 3, "chroma key, aniso\n");
293 offset
+= sizeof(*samp
);
295 drm_intel_bo_unmap(intel
->batch
.bo
);
299 static void dump_sf_viewport_state(struct brw_context
*brw
,
302 struct intel_context
*intel
= &brw
->intel
;
303 const char *name
= "SF VP";
304 struct brw_sf_viewport
*vp
= intel
->batch
.bo
->virtual + offset
;
306 assert(intel
->gen
< 7);
308 batch_out(brw
, name
, offset
, 0, "m00 = %f\n", vp
->viewport
.m00
);
309 batch_out(brw
, name
, offset
, 1, "m11 = %f\n", vp
->viewport
.m11
);
310 batch_out(brw
, name
, offset
, 2, "m22 = %f\n", vp
->viewport
.m22
);
311 batch_out(brw
, name
, offset
, 3, "m30 = %f\n", vp
->viewport
.m30
);
312 batch_out(brw
, name
, offset
, 4, "m31 = %f\n", vp
->viewport
.m31
);
313 batch_out(brw
, name
, offset
, 5, "m32 = %f\n", vp
->viewport
.m32
);
315 batch_out(brw
, name
, offset
, 6, "top left = %d,%d\n",
316 vp
->scissor
.xmin
, vp
->scissor
.ymin
);
317 batch_out(brw
, name
, offset
, 7, "bottom right = %d,%d\n",
318 vp
->scissor
.xmax
, vp
->scissor
.ymax
);
321 static void dump_clip_viewport_state(struct brw_context
*brw
,
324 struct intel_context
*intel
= &brw
->intel
;
325 const char *name
= "CLIP VP";
326 struct brw_clipper_viewport
*vp
= intel
->batch
.bo
->virtual + offset
;
328 assert(intel
->gen
< 7);
330 batch_out(brw
, name
, offset
, 0, "xmin = %f\n", vp
->xmin
);
331 batch_out(brw
, name
, offset
, 1, "xmax = %f\n", vp
->xmax
);
332 batch_out(brw
, name
, offset
, 2, "ymin = %f\n", vp
->ymin
);
333 batch_out(brw
, name
, offset
, 3, "ymax = %f\n", vp
->ymax
);
336 static void dump_sf_clip_viewport_state(struct brw_context
*brw
,
339 struct intel_context
*intel
= &brw
->intel
;
340 const char *name
= "SF_CLIP VP";
341 struct gen7_sf_clip_viewport
*vp
= intel
->batch
.bo
->virtual + offset
;
343 assert(intel
->gen
>= 7);
345 batch_out(brw
, name
, offset
, 0, "m00 = %f\n", vp
->viewport
.m00
);
346 batch_out(brw
, name
, offset
, 1, "m11 = %f\n", vp
->viewport
.m11
);
347 batch_out(brw
, name
, offset
, 2, "m22 = %f\n", vp
->viewport
.m22
);
348 batch_out(brw
, name
, offset
, 3, "m30 = %f\n", vp
->viewport
.m30
);
349 batch_out(brw
, name
, offset
, 4, "m31 = %f\n", vp
->viewport
.m31
);
350 batch_out(brw
, name
, offset
, 5, "m32 = %f\n", vp
->viewport
.m32
);
351 batch_out(brw
, name
, offset
, 6, "guardband xmin = %f\n", vp
->guardband
.xmin
);
352 batch_out(brw
, name
, offset
, 7, "guardband xmax = %f\n", vp
->guardband
.xmax
);
353 batch_out(brw
, name
, offset
, 8, "guardband ymin = %f\n", vp
->guardband
.ymin
);
354 batch_out(brw
, name
, offset
, 9, "guardband ymax = %f\n", vp
->guardband
.ymax
);
358 static void dump_cc_viewport_state(struct brw_context
*brw
, uint32_t offset
)
360 const char *name
= "CC VP";
361 struct brw_cc_viewport
*vp
= brw
->intel
.batch
.bo
->virtual + offset
;
363 batch_out(brw
, name
, offset
, 0, "min_depth = %f\n", vp
->min_depth
);
364 batch_out(brw
, name
, offset
, 1, "max_depth = %f\n", vp
->max_depth
);
367 static void dump_depth_stencil_state(struct brw_context
*brw
, uint32_t offset
)
369 const char *name
= "D_S";
370 struct gen6_depth_stencil_state
*ds
= brw
->intel
.batch
.bo
->virtual + offset
;
372 batch_out(brw
, name
, offset
, 0,
373 "stencil %sable, func %d, write %sable\n",
374 ds
->ds0
.stencil_enable
? "en" : "dis",
375 ds
->ds0
.stencil_func
,
376 ds
->ds0
.stencil_write_enable
? "en" : "dis");
377 batch_out(brw
, name
, offset
, 1,
378 "stencil test mask 0x%x, write mask 0x%x\n",
379 ds
->ds1
.stencil_test_mask
, ds
->ds1
.stencil_write_mask
);
380 batch_out(brw
, name
, offset
, 2,
381 "depth test %sable, func %d, write %sable\n",
382 ds
->ds2
.depth_test_enable
? "en" : "dis",
383 ds
->ds2
.depth_test_func
,
384 ds
->ds2
.depth_write_enable
? "en" : "dis");
387 static void dump_cc_state_gen4(struct brw_context
*brw
, uint32_t offset
)
389 const char *name
= "CC";
391 batch_out(brw
, name
, offset
, 0, "cc0\n");
392 batch_out(brw
, name
, offset
, 1, "cc1\n");
393 batch_out(brw
, name
, offset
, 2, "cc2\n");
394 batch_out(brw
, name
, offset
, 3, "cc3\n");
395 batch_out(brw
, name
, offset
, 4, "cc4: viewport offset\n");
396 batch_out(brw
, name
, offset
, 5, "cc5\n");
397 batch_out(brw
, name
, offset
, 6, "cc6\n");
398 batch_out(brw
, name
, offset
, 7, "cc7\n");
401 static void dump_cc_state_gen6(struct brw_context
*brw
, uint32_t offset
)
403 const char *name
= "CC";
404 struct gen6_color_calc_state
*cc
= brw
->intel
.batch
.bo
->virtual + offset
;
406 batch_out(brw
, name
, offset
, 0,
407 "alpha test format %s, round disable %d, stencil ref %d, "
408 "bf stencil ref %d\n",
409 cc
->cc0
.alpha_test_format
? "FLOAT32" : "UNORM8",
410 cc
->cc0
.round_disable
,
412 cc
->cc0
.bf_stencil_ref
);
413 batch_out(brw
, name
, offset
, 1, "\n");
414 batch_out(brw
, name
, offset
, 2, "constant red %f\n", cc
->constant_r
);
415 batch_out(brw
, name
, offset
, 3, "constant green %f\n", cc
->constant_g
);
416 batch_out(brw
, name
, offset
, 4, "constant blue %f\n", cc
->constant_b
);
417 batch_out(brw
, name
, offset
, 5, "constant alpha %f\n", cc
->constant_a
);
420 static void dump_blend_state(struct brw_context
*brw
, uint32_t offset
)
422 const char *name
= "BLEND";
424 batch_out(brw
, name
, offset
, 0, "\n");
425 batch_out(brw
, name
, offset
, 1, "\n");
429 dump_scissor(struct brw_context
*brw
, uint32_t offset
)
431 const char *name
= "SCISSOR";
432 struct intel_context
*intel
= &brw
->intel
;
433 struct gen6_scissor_rect
*scissor
= intel
->batch
.bo
->virtual + offset
;
435 batch_out(brw
, name
, offset
, 0, "xmin %d, ymin %d\n",
436 scissor
->xmin
, scissor
->ymin
);
437 batch_out(brw
, name
, offset
, 1, "xmax %d, ymax %d\n",
438 scissor
->xmax
, scissor
->ymax
);
442 dump_vs_constants(struct brw_context
*brw
, uint32_t offset
, uint32_t size
)
444 const char *name
= "VS_CONST";
445 struct intel_context
*intel
= &brw
->intel
;
446 uint32_t *as_uint
= intel
->batch
.bo
->virtual + offset
;
447 float *as_float
= intel
->batch
.bo
->virtual + offset
;
450 for (i
= 0; i
< size
/ 4; i
+= 4) {
451 batch_out(brw
, name
, offset
, i
, "%3d: (% f % f % f % f) (0x%08x 0x%08x 0x%08x 0x%08x)\n",
453 as_float
[i
], as_float
[i
+ 1], as_float
[i
+ 2], as_float
[i
+ 3],
454 as_uint
[i
], as_uint
[i
+ 1], as_uint
[i
+ 2], as_uint
[i
+ 3]);
459 dump_wm_constants(struct brw_context
*brw
, uint32_t offset
, uint32_t size
)
461 const char *name
= "WM_CONST";
462 struct intel_context
*intel
= &brw
->intel
;
463 uint32_t *as_uint
= intel
->batch
.bo
->virtual + offset
;
464 float *as_float
= intel
->batch
.bo
->virtual + offset
;
467 for (i
= 0; i
< size
/ 4; i
+= 4) {
468 batch_out(brw
, name
, offset
, i
, "%3d: (% f % f % f % f) (0x%08x 0x%08x 0x%08x 0x%08x)\n",
470 as_float
[i
], as_float
[i
+ 1], as_float
[i
+ 2], as_float
[i
+ 3],
471 as_uint
[i
], as_uint
[i
+ 1], as_uint
[i
+ 2], as_uint
[i
+ 3]);
475 static void dump_binding_table(struct brw_context
*brw
, uint32_t offset
,
480 uint32_t *data
= brw
->intel
.batch
.bo
->virtual + offset
;
482 for (i
= 0; i
< size
/ 4; i
++) {
486 sprintf(name
, "BIND%d", i
);
487 batch_out(brw
, name
, offset
, i
, "surface state address\n");
492 dump_prog_cache(struct brw_context
*brw
)
494 struct intel_context
*intel
= &brw
->intel
;
495 struct brw_cache
*cache
= &brw
->cache
;
499 drm_intel_bo_map(brw
->cache
.bo
, false);
501 for (b
= 0; b
< cache
->size
; b
++) {
502 struct brw_cache_item
*item
;
504 for (item
= cache
->items
[b
]; item
; item
= item
->next
) {
506 uint32_t offset
= item
->offset
;
508 data
= brw
->cache
.bo
->virtual + item
->offset
;
510 switch (item
->cache_id
) {
518 name
= "CLIP kernel";
531 for (i
= 0; i
< item
->size
/ 4 / 4; i
++) {
532 fprintf(stderr
, "0x%08x: %8s: 0x%08x 0x%08x 0x%08x 0x%08x ",
535 data
[i
* 4], data
[i
* 4 + 1], data
[i
* 4 + 2], data
[i
* 4 + 3]);
537 brw_disasm(stderr
, (void *)(data
+ i
* 4), intel
->gen
);
542 drm_intel_bo_unmap(brw
->cache
.bo
);
546 dump_state_batch(struct brw_context
*brw
)
548 struct intel_context
*intel
= &brw
->intel
;
551 for (i
= 0; i
< brw
->state_batch_count
; i
++) {
552 uint32_t offset
= brw
->state_batch_list
[i
].offset
;
553 uint32_t size
= brw
->state_batch_list
[i
].size
;
555 switch (brw
->state_batch_list
[i
].type
) {
556 case AUB_TRACE_VS_STATE
:
557 dump_vs_state(brw
, offset
);
559 case AUB_TRACE_GS_STATE
:
560 dump_gs_state(brw
, offset
);
562 case AUB_TRACE_CLIP_STATE
:
563 dump_clip_state(brw
, offset
);
565 case AUB_TRACE_SF_STATE
:
566 dump_sf_state(brw
, offset
);
568 case AUB_TRACE_WM_STATE
:
569 dump_wm_state(brw
, offset
);
571 case AUB_TRACE_CLIP_VP_STATE
:
572 dump_clip_viewport_state(brw
, offset
);
574 case AUB_TRACE_SF_VP_STATE
:
575 if (intel
->gen
>= 7) {
576 dump_sf_clip_viewport_state(brw
, offset
);
578 dump_sf_viewport_state(brw
, offset
);
581 case AUB_TRACE_CC_VP_STATE
:
582 dump_cc_viewport_state(brw
, offset
);
584 case AUB_TRACE_DEPTH_STENCIL_STATE
:
585 dump_depth_stencil_state(brw
, offset
);
587 case AUB_TRACE_CC_STATE
:
589 dump_cc_state_gen6(brw
, offset
);
591 dump_cc_state_gen4(brw
, offset
);
593 case AUB_TRACE_BLEND_STATE
:
594 dump_blend_state(brw
, offset
);
596 case AUB_TRACE_BINDING_TABLE
:
597 dump_binding_table(brw
, offset
, size
);
599 case AUB_TRACE_SURFACE_STATE
:
600 if (intel
->gen
< 7) {
601 dump_surface_state(brw
, offset
);
603 dump_gen7_surface_state(brw
, offset
);
606 case AUB_TRACE_SAMPLER_STATE
:
607 if (intel
->gen
< 7) {
608 dump_sampler_state(brw
, offset
, size
);
610 dump_gen7_sampler_state(brw
, offset
, size
);
613 case AUB_TRACE_SAMPLER_DEFAULT_COLOR
:
614 dump_sdc(brw
, offset
);
616 case AUB_TRACE_SCISSOR_STATE
:
617 dump_scissor(brw
, offset
);
619 case AUB_TRACE_VS_CONSTANTS
:
620 dump_vs_constants(brw
, offset
, size
);
622 case AUB_TRACE_WM_CONSTANTS
:
623 dump_wm_constants(brw
, offset
, size
);
632 * Print additional debug information associated with the batchbuffer
633 * when DEBUG_BATCH is set.
635 * For 965, this means mapping the state buffers that would have been referenced
636 * by the batchbuffer and dumping them.
638 * The buffer offsets printed rely on the buffer containing the last offset
639 * it was validated at.
641 void brw_debug_batch(struct intel_context
*intel
)
643 struct brw_context
*brw
= brw_context(&intel
->ctx
);
645 drm_intel_bo_map(intel
->batch
.bo
, false);
646 dump_state_batch(brw
);
647 drm_intel_bo_unmap(intel
->batch
.bo
);
650 dump_prog_cache(brw
);