2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "common/gen_decoder.h"
25 #include "gen_disasm.h"
26 #include "util/macros.h"
27 #include "main/macros.h" /* Needed for ROUND_DOWN_TO */
32 gen_batch_decode_ctx_init(struct gen_batch_decode_ctx
*ctx
,
33 const struct gen_device_info
*devinfo
,
34 FILE *fp
, enum gen_batch_decode_flags flags
,
36 struct gen_batch_decode_bo (*get_bo
)(void *,
39 unsigned (*get_state_size
)(void *, uint32_t),
42 memset(ctx
, 0, sizeof(*ctx
));
45 ctx
->get_state_size
= get_state_size
;
46 ctx
->user_data
= user_data
;
49 ctx
->max_vbo_decoded_lines
= -1; /* No limit! */
50 ctx
->engine
= I915_ENGINE_CLASS_RENDER
;
53 ctx
->spec
= gen_spec_load(devinfo
);
55 ctx
->spec
= gen_spec_load_from_path(devinfo
, xml_path
);
56 ctx
->disasm
= gen_disasm_create(devinfo
);
60 gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx
*ctx
)
62 gen_spec_destroy(ctx
->spec
);
63 gen_disasm_destroy(ctx
->disasm
);
67 #define RED_COLOR CSI "31m"
68 #define BLUE_HEADER CSI "0;44m"
69 #define GREEN_HEADER CSI "1;42m"
70 #define NORMAL CSI "0m"
73 ctx_print_group(struct gen_batch_decode_ctx
*ctx
,
74 struct gen_group
*group
,
75 uint64_t address
, const void *map
)
77 gen_print_group(ctx
->fp
, group
, address
, map
, 0,
78 (ctx
->flags
& GEN_BATCH_DECODE_IN_COLOR
) != 0);
81 static struct gen_batch_decode_bo
82 ctx_get_bo(struct gen_batch_decode_ctx
*ctx
, bool ppgtt
, uint64_t addr
)
84 if (gen_spec_get_gen(ctx
->spec
) >= gen_make_gen(8,0)) {
85 /* On Broadwell and above, we have 48-bit addresses which consume two
86 * dwords. Some packets require that these get stored in a "canonical
87 * form" which means that bit 47 is sign-extended through the upper
88 * bits. In order to correctly handle those aub dumps, we need to mask
89 * off the top 16 bits.
91 addr
&= (~0ull >> 16);
94 struct gen_batch_decode_bo bo
= ctx
->get_bo(ctx
->user_data
, ppgtt
, addr
);
96 if (gen_spec_get_gen(ctx
->spec
) >= gen_make_gen(8,0))
97 bo
.addr
&= (~0ull >> 16);
99 /* We may actually have an offset into the bo */
100 if (bo
.map
!= NULL
) {
101 assert(bo
.addr
<= addr
);
102 uint64_t offset
= addr
- bo
.addr
;
112 update_count(struct gen_batch_decode_ctx
*ctx
,
113 uint32_t offset_from_dsba
,
114 unsigned element_dwords
,
119 if (ctx
->get_state_size
)
120 size
= ctx
->get_state_size(ctx
->user_data
, offset_from_dsba
);
123 return size
/ (sizeof(uint32_t) * element_dwords
);
125 /* In the absence of any information, just guess arbitrarily. */
130 ctx_disassemble_program(struct gen_batch_decode_ctx
*ctx
,
131 uint32_t ksp
, const char *type
)
133 uint64_t addr
= ctx
->instruction_base
+ ksp
;
134 struct gen_batch_decode_bo bo
= ctx_get_bo(ctx
, true, addr
);
138 fprintf(ctx
->fp
, "\nReferenced %s:\n", type
);
139 gen_disasm_disassemble(ctx
->disasm
, bo
.map
, 0, ctx
->fp
);
142 /* Heuristic to determine whether a uint32_t is probably actually a float
143 * (http://stackoverflow.com/a/2953466)
147 probably_float(uint32_t bits
)
149 int exp
= ((bits
& 0x7f800000U
) >> 23) - 127;
150 uint32_t mant
= bits
& 0x007fffff;
153 if (exp
== -127 && mant
== 0)
156 /* +- 1 billionth to 1 billion */
157 if (-30 <= exp
&& exp
<= 30)
160 /* some value with only a few binary digits */
161 if ((mant
& 0x0000ffff) == 0)
168 ctx_print_buffer(struct gen_batch_decode_ctx
*ctx
,
169 struct gen_batch_decode_bo bo
,
170 uint32_t read_length
,
174 const uint32_t *dw_end
=
175 bo
.map
+ ROUND_DOWN_TO(MIN2(bo
.size
, read_length
), 4);
177 int column_count
= 0, line_count
= -1;
178 for (const uint32_t *dw
= bo
.map
; dw
< dw_end
; dw
++) {
179 if (column_count
* 4 == pitch
|| column_count
== 8) {
180 fprintf(ctx
->fp
, "\n");
184 if (max_lines
>= 0 && line_count
>= max_lines
)
187 fprintf(ctx
->fp
, column_count
== 0 ? " " : " ");
189 if ((ctx
->flags
& GEN_BATCH_DECODE_FLOATS
) && probably_float(*dw
))
190 fprintf(ctx
->fp
, " %8.2f", *(float *) dw
);
192 fprintf(ctx
->fp
, " 0x%08x", *dw
);
196 fprintf(ctx
->fp
, "\n");
199 static struct gen_group
*
200 gen_ctx_find_instruction(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
202 return gen_spec_find_instruction(ctx
->spec
, ctx
->engine
, p
);
206 handle_state_base_address(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
208 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
210 struct gen_field_iterator iter
;
211 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
213 uint64_t surface_base
= 0, dynamic_base
= 0, instruction_base
= 0;
214 bool surface_modify
= 0, dynamic_modify
= 0, instruction_modify
= 0;
216 while (gen_field_iterator_next(&iter
)) {
217 if (strcmp(iter
.name
, "Surface State Base Address") == 0) {
218 surface_base
= iter
.raw_value
;
219 } else if (strcmp(iter
.name
, "Dynamic State Base Address") == 0) {
220 dynamic_base
= iter
.raw_value
;
221 } else if (strcmp(iter
.name
, "Instruction Base Address") == 0) {
222 instruction_base
= iter
.raw_value
;
223 } else if (strcmp(iter
.name
, "Surface State Base Address Modify Enable") == 0) {
224 surface_modify
= iter
.raw_value
;
225 } else if (strcmp(iter
.name
, "Dynamic State Base Address Modify Enable") == 0) {
226 dynamic_modify
= iter
.raw_value
;
227 } else if (strcmp(iter
.name
, "Instruction Base Address Modify Enable") == 0) {
228 instruction_modify
= iter
.raw_value
;
233 ctx
->dynamic_base
= dynamic_base
;
236 ctx
->surface_base
= surface_base
;
238 if (instruction_modify
)
239 ctx
->instruction_base
= instruction_base
;
243 dump_binding_table(struct gen_batch_decode_ctx
*ctx
, uint32_t offset
, int count
)
245 struct gen_group
*strct
=
246 gen_spec_find_struct(ctx
->spec
, "RENDER_SURFACE_STATE");
248 fprintf(ctx
->fp
, "did not find RENDER_SURFACE_STATE info\n");
253 count
= update_count(ctx
, offset
, 1, 8);
255 if (offset
% 32 != 0 || offset
>= UINT16_MAX
) {
256 fprintf(ctx
->fp
, " invalid binding table pointer\n");
260 struct gen_batch_decode_bo bind_bo
=
261 ctx_get_bo(ctx
, true, ctx
->surface_base
+ offset
);
263 if (bind_bo
.map
== NULL
) {
264 fprintf(ctx
->fp
, " binding table unavailable\n");
268 const uint32_t *pointers
= bind_bo
.map
;
269 for (int i
= 0; i
< count
; i
++) {
270 if (pointers
[i
] == 0)
273 uint64_t addr
= ctx
->surface_base
+ pointers
[i
];
274 struct gen_batch_decode_bo bo
= ctx_get_bo(ctx
, true, addr
);
275 uint32_t size
= strct
->dw_length
* 4;
277 if (pointers
[i
] % 32 != 0 ||
278 addr
< bo
.addr
|| addr
+ size
>= bo
.addr
+ bo
.size
) {
279 fprintf(ctx
->fp
, "pointer %u: 0x%08x <not valid>\n", i
, pointers
[i
]);
283 fprintf(ctx
->fp
, "pointer %u: 0x%08x\n", i
, pointers
[i
]);
284 ctx_print_group(ctx
, strct
, addr
, bo
.map
+ (addr
- bo
.addr
));
289 dump_samplers(struct gen_batch_decode_ctx
*ctx
, uint32_t offset
, int count
)
291 struct gen_group
*strct
= gen_spec_find_struct(ctx
->spec
, "SAMPLER_STATE");
294 count
= update_count(ctx
, offset
, strct
->dw_length
, 4);
296 uint64_t state_addr
= ctx
->dynamic_base
+ offset
;
297 struct gen_batch_decode_bo bo
= ctx_get_bo(ctx
, true, state_addr
);
298 const void *state_map
= bo
.map
;
300 if (state_map
== NULL
) {
301 fprintf(ctx
->fp
, " samplers unavailable\n");
305 if (offset
% 32 != 0 || state_addr
- bo
.addr
>= bo
.size
) {
306 fprintf(ctx
->fp
, " invalid sampler state pointer\n");
310 for (int i
= 0; i
< count
; i
++) {
311 fprintf(ctx
->fp
, "sampler state %d\n", i
);
312 ctx_print_group(ctx
, strct
, state_addr
, state_map
);
319 handle_media_interface_descriptor_load(struct gen_batch_decode_ctx
*ctx
,
322 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
323 struct gen_group
*desc
=
324 gen_spec_find_struct(ctx
->spec
, "INTERFACE_DESCRIPTOR_DATA");
326 struct gen_field_iterator iter
;
327 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
328 uint32_t descriptor_offset
= 0;
329 int descriptor_count
= 0;
330 while (gen_field_iterator_next(&iter
)) {
331 if (strcmp(iter
.name
, "Interface Descriptor Data Start Address") == 0) {
332 descriptor_offset
= strtol(iter
.value
, NULL
, 16);
333 } else if (strcmp(iter
.name
, "Interface Descriptor Total Length") == 0) {
335 strtol(iter
.value
, NULL
, 16) / (desc
->dw_length
* 4);
339 uint64_t desc_addr
= ctx
->dynamic_base
+ descriptor_offset
;
340 struct gen_batch_decode_bo bo
= ctx_get_bo(ctx
, true, desc_addr
);
341 const void *desc_map
= bo
.map
;
343 if (desc_map
== NULL
) {
344 fprintf(ctx
->fp
, " interface descriptors unavailable\n");
348 for (int i
= 0; i
< descriptor_count
; i
++) {
349 fprintf(ctx
->fp
, "descriptor %d: %08x\n", i
, descriptor_offset
);
351 ctx_print_group(ctx
, desc
, desc_addr
, desc_map
);
353 gen_field_iterator_init(&iter
, desc
, desc_map
, 0, false);
355 uint32_t sampler_offset
= 0, sampler_count
= 0;
356 uint32_t binding_table_offset
= 0, binding_entry_count
= 0;
357 while (gen_field_iterator_next(&iter
)) {
358 if (strcmp(iter
.name
, "Kernel Start Pointer") == 0) {
359 ksp
= strtoll(iter
.value
, NULL
, 16);
360 } else if (strcmp(iter
.name
, "Sampler State Pointer") == 0) {
361 sampler_offset
= strtol(iter
.value
, NULL
, 16);
362 } else if (strcmp(iter
.name
, "Sampler Count") == 0) {
363 sampler_count
= strtol(iter
.value
, NULL
, 10);
364 } else if (strcmp(iter
.name
, "Binding Table Pointer") == 0) {
365 binding_table_offset
= strtol(iter
.value
, NULL
, 16);
366 } else if (strcmp(iter
.name
, "Binding Table Entry Count") == 0) {
367 binding_entry_count
= strtol(iter
.value
, NULL
, 10);
371 ctx_disassemble_program(ctx
, ksp
, "compute shader");
374 dump_samplers(ctx
, sampler_offset
, sampler_count
);
375 dump_binding_table(ctx
, binding_table_offset
, binding_entry_count
);
377 desc_map
+= desc
->dw_length
;
378 desc_addr
+= desc
->dw_length
* 4;
383 handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx
*ctx
,
386 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
387 struct gen_group
*vbs
= gen_spec_find_struct(ctx
->spec
, "VERTEX_BUFFER_STATE");
389 struct gen_batch_decode_bo vb
= {};
390 uint32_t vb_size
= 0;
395 struct gen_field_iterator iter
;
396 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
397 while (gen_field_iterator_next(&iter
)) {
398 if (iter
.struct_desc
!= vbs
)
401 struct gen_field_iterator vbs_iter
;
402 gen_field_iterator_init(&vbs_iter
, vbs
, &iter
.p
[iter
.start_bit
/ 32], 0, false);
403 while (gen_field_iterator_next(&vbs_iter
)) {
404 if (strcmp(vbs_iter
.name
, "Vertex Buffer Index") == 0) {
405 index
= vbs_iter
.raw_value
;
406 } else if (strcmp(vbs_iter
.name
, "Buffer Pitch") == 0) {
407 pitch
= vbs_iter
.raw_value
;
408 } else if (strcmp(vbs_iter
.name
, "Buffer Starting Address") == 0) {
409 vb
= ctx_get_bo(ctx
, true, vbs_iter
.raw_value
);
410 } else if (strcmp(vbs_iter
.name
, "Buffer Size") == 0) {
411 vb_size
= vbs_iter
.raw_value
;
413 } else if (strcmp(vbs_iter
.name
, "End Address") == 0) {
414 if (vb
.map
&& vbs_iter
.raw_value
>= vb
.addr
)
415 vb_size
= (vbs_iter
.raw_value
+ 1) - vb
.addr
;
424 fprintf(ctx
->fp
, "vertex buffer %d, size %d\n", index
, vb_size
);
426 if (vb
.map
== NULL
) {
427 fprintf(ctx
->fp
, " buffer contents unavailable\n");
431 if (vb
.map
== 0 || vb_size
== 0)
434 ctx_print_buffer(ctx
, vb
, vb_size
, pitch
, ctx
->max_vbo_decoded_lines
);
446 handle_3dstate_index_buffer(struct gen_batch_decode_ctx
*ctx
,
449 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
451 struct gen_batch_decode_bo ib
= {};
452 uint32_t ib_size
= 0;
455 struct gen_field_iterator iter
;
456 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
457 while (gen_field_iterator_next(&iter
)) {
458 if (strcmp(iter
.name
, "Index Format") == 0) {
459 format
= iter
.raw_value
;
460 } else if (strcmp(iter
.name
, "Buffer Starting Address") == 0) {
461 ib
= ctx_get_bo(ctx
, true, iter
.raw_value
);
462 } else if (strcmp(iter
.name
, "Buffer Size") == 0) {
463 ib_size
= iter
.raw_value
;
467 if (ib
.map
== NULL
) {
468 fprintf(ctx
->fp
, " buffer contents unavailable\n");
472 const void *m
= ib
.map
;
473 const void *ib_end
= ib
.map
+ MIN2(ib
.size
, ib_size
);
474 for (int i
= 0; m
< ib_end
&& i
< 10; i
++) {
477 fprintf(ctx
->fp
, "%3d ", *(uint8_t *)m
);
481 fprintf(ctx
->fp
, "%3d ", *(uint16_t *)m
);
485 fprintf(ctx
->fp
, "%3d ", *(uint32_t *)m
);
492 fprintf(ctx
->fp
, "...");
493 fprintf(ctx
->fp
, "\n");
497 decode_single_ksp(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
499 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
502 bool is_simd8
= false; /* vertex shaders on Gen8+ only */
503 bool is_enabled
= true;
505 struct gen_field_iterator iter
;
506 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
507 while (gen_field_iterator_next(&iter
)) {
508 if (strcmp(iter
.name
, "Kernel Start Pointer") == 0) {
509 ksp
= iter
.raw_value
;
510 } else if (strcmp(iter
.name
, "SIMD8 Dispatch Enable") == 0) {
511 is_simd8
= iter
.raw_value
;
512 } else if (strcmp(iter
.name
, "Dispatch Mode") == 0) {
513 is_simd8
= strcmp(iter
.value
, "SIMD8") == 0;
514 } else if (strcmp(iter
.name
, "Dispatch Enable") == 0) {
515 is_simd8
= strcmp(iter
.value
, "SIMD8") == 0;
516 } else if (strcmp(iter
.name
, "Enable") == 0) {
517 is_enabled
= iter
.raw_value
;
522 strcmp(inst
->name
, "VS_STATE") == 0 ? "vertex shader" :
523 strcmp(inst
->name
, "GS_STATE") == 0 ? "geometry shader" :
524 strcmp(inst
->name
, "SF_STATE") == 0 ? "strips and fans shader" :
525 strcmp(inst
->name
, "CLIP_STATE") == 0 ? "clip shader" :
526 strcmp(inst
->name
, "3DSTATE_DS") == 0 ? "tessellation evaluation shader" :
527 strcmp(inst
->name
, "3DSTATE_HS") == 0 ? "tessellation control shader" :
528 strcmp(inst
->name
, "3DSTATE_VS") == 0 ? (is_simd8
? "SIMD8 vertex shader" : "vec4 vertex shader") :
529 strcmp(inst
->name
, "3DSTATE_GS") == 0 ? (is_simd8
? "SIMD8 geometry shader" : "vec4 geometry shader") :
533 ctx_disassemble_program(ctx
, ksp
, type
);
539 decode_ps_kernels(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
541 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
543 uint64_t ksp
[3] = {0, 0, 0};
544 bool enabled
[3] = {false, false, false};
546 struct gen_field_iterator iter
;
547 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
548 while (gen_field_iterator_next(&iter
)) {
549 if (strncmp(iter
.name
, "Kernel Start Pointer ",
550 strlen("Kernel Start Pointer ")) == 0) {
551 int idx
= iter
.name
[strlen("Kernel Start Pointer ")] - '0';
552 ksp
[idx
] = strtol(iter
.value
, NULL
, 16);
553 } else if (strcmp(iter
.name
, "8 Pixel Dispatch Enable") == 0) {
554 enabled
[0] = strcmp(iter
.value
, "true") == 0;
555 } else if (strcmp(iter
.name
, "16 Pixel Dispatch Enable") == 0) {
556 enabled
[1] = strcmp(iter
.value
, "true") == 0;
557 } else if (strcmp(iter
.name
, "32 Pixel Dispatch Enable") == 0) {
558 enabled
[2] = strcmp(iter
.value
, "true") == 0;
562 /* Reorder KSPs to be [8, 16, 32] instead of the hardware order. */
563 if (enabled
[0] + enabled
[1] + enabled
[2] == 1) {
567 } else if (enabled
[2]) {
572 uint64_t tmp
= ksp
[1];
578 ctx_disassemble_program(ctx
, ksp
[0], "SIMD8 fragment shader");
580 ctx_disassemble_program(ctx
, ksp
[1], "SIMD16 fragment shader");
582 ctx_disassemble_program(ctx
, ksp
[2], "SIMD32 fragment shader");
583 fprintf(ctx
->fp
, "\n");
587 decode_3dstate_constant(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
589 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
590 struct gen_group
*body
=
591 gen_spec_find_struct(ctx
->spec
, "3DSTATE_CONSTANT_BODY");
593 uint32_t read_length
[4] = {0};
594 uint64_t read_addr
[4];
596 struct gen_field_iterator outer
;
597 gen_field_iterator_init(&outer
, inst
, p
, 0, false);
598 while (gen_field_iterator_next(&outer
)) {
599 if (outer
.struct_desc
!= body
)
602 struct gen_field_iterator iter
;
603 gen_field_iterator_init(&iter
, body
, &outer
.p
[outer
.start_bit
/ 32],
606 while (gen_field_iterator_next(&iter
)) {
608 if (sscanf(iter
.name
, "Read Length[%d]", &idx
) == 1) {
609 read_length
[idx
] = iter
.raw_value
;
610 } else if (sscanf(iter
.name
, "Buffer[%d]", &idx
) == 1) {
611 read_addr
[idx
] = iter
.raw_value
;
615 for (int i
= 0; i
< 4; i
++) {
616 if (read_length
[i
] == 0)
619 struct gen_batch_decode_bo buffer
= ctx_get_bo(ctx
, true, read_addr
[i
]);
621 fprintf(ctx
->fp
, "constant buffer %d unavailable\n", i
);
625 unsigned size
= read_length
[i
] * 32;
626 fprintf(ctx
->fp
, "constant buffer %d, size %u\n", i
, size
);
628 ctx_print_buffer(ctx
, buffer
, size
, 0, -1);
634 decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx
*ctx
,
637 dump_binding_table(ctx
, p
[1], -1);
641 decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx
*ctx
,
644 dump_samplers(ctx
, p
[1], -1);
648 decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx
*ctx
,
651 dump_samplers(ctx
, p
[1], -1);
652 dump_samplers(ctx
, p
[2], -1);
653 dump_samplers(ctx
, p
[3], -1);
657 str_ends_with(const char *str
, const char *end
)
659 int offset
= strlen(str
) - strlen(end
);
663 return strcmp(str
+ offset
, end
) == 0;
667 decode_dynamic_state_pointers(struct gen_batch_decode_ctx
*ctx
,
668 const char *struct_type
, const uint32_t *p
,
671 struct gen_group
*inst
= gen_ctx_find_instruction(ctx
, p
);
673 uint32_t state_offset
= 0;
675 struct gen_field_iterator iter
;
676 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
677 while (gen_field_iterator_next(&iter
)) {
678 if (str_ends_with(iter
.name
, "Pointer")) {
679 state_offset
= iter
.raw_value
;
684 uint64_t state_addr
= ctx
->dynamic_base
+ state_offset
;
685 struct gen_batch_decode_bo bo
= ctx_get_bo(ctx
, true, state_addr
);
686 const void *state_map
= bo
.map
;
688 if (state_map
== NULL
) {
689 fprintf(ctx
->fp
, " dynamic %s state unavailable\n", struct_type
);
693 struct gen_group
*state
= gen_spec_find_struct(ctx
->spec
, struct_type
);
694 if (strcmp(struct_type
, "BLEND_STATE") == 0) {
695 /* Blend states are different from the others because they have a header
696 * struct called BLEND_STATE which is followed by a variable number of
697 * BLEND_STATE_ENTRY structs.
699 fprintf(ctx
->fp
, "%s\n", struct_type
);
700 ctx_print_group(ctx
, state
, state_addr
, state_map
);
702 state_addr
+= state
->dw_length
* 4;
703 state_map
+= state
->dw_length
* 4;
705 struct_type
= "BLEND_STATE_ENTRY";
706 state
= gen_spec_find_struct(ctx
->spec
, struct_type
);
709 for (int i
= 0; i
< count
; i
++) {
710 fprintf(ctx
->fp
, "%s %d\n", struct_type
, i
);
711 ctx_print_group(ctx
, state
, state_addr
, state_map
);
713 state_addr
+= state
->dw_length
* 4;
714 state_map
+= state
->dw_length
* 4;
719 decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx
*ctx
,
722 decode_dynamic_state_pointers(ctx
, "CC_VIEWPORT", p
, 4);
726 decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx
*ctx
,
729 decode_dynamic_state_pointers(ctx
, "SF_CLIP_VIEWPORT", p
, 4);
733 decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx
*ctx
,
736 decode_dynamic_state_pointers(ctx
, "BLEND_STATE", p
, 1);
740 decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx
*ctx
,
743 decode_dynamic_state_pointers(ctx
, "COLOR_CALC_STATE", p
, 1);
747 decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx
*ctx
,
750 decode_dynamic_state_pointers(ctx
, "SCISSOR_RECT", p
, 1);
754 decode_load_register_imm(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
)
756 struct gen_group
*reg
= gen_spec_find_register(ctx
->spec
, p
[1]);
759 fprintf(ctx
->fp
, "register %s (0x%x): 0x%x\n",
760 reg
->name
, reg
->register_offset
, p
[2]);
761 ctx_print_group(ctx
, reg
, reg
->register_offset
, &p
[2]);
765 struct custom_decoder
{
766 const char *cmd_name
;
767 void (*decode
)(struct gen_batch_decode_ctx
*ctx
, const uint32_t *p
);
768 } custom_decoders
[] = {
769 { "STATE_BASE_ADDRESS", handle_state_base_address
},
770 { "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load
},
771 { "3DSTATE_VERTEX_BUFFERS", handle_3dstate_vertex_buffers
},
772 { "3DSTATE_INDEX_BUFFER", handle_3dstate_index_buffer
},
773 { "3DSTATE_VS", decode_single_ksp
},
774 { "3DSTATE_GS", decode_single_ksp
},
775 { "3DSTATE_DS", decode_single_ksp
},
776 { "3DSTATE_HS", decode_single_ksp
},
777 { "3DSTATE_PS", decode_ps_kernels
},
778 { "3DSTATE_CONSTANT_VS", decode_3dstate_constant
},
779 { "3DSTATE_CONSTANT_GS", decode_3dstate_constant
},
780 { "3DSTATE_CONSTANT_PS", decode_3dstate_constant
},
781 { "3DSTATE_CONSTANT_HS", decode_3dstate_constant
},
782 { "3DSTATE_CONSTANT_DS", decode_3dstate_constant
},
784 { "3DSTATE_BINDING_TABLE_POINTERS_VS", decode_3dstate_binding_table_pointers
},
785 { "3DSTATE_BINDING_TABLE_POINTERS_HS", decode_3dstate_binding_table_pointers
},
786 { "3DSTATE_BINDING_TABLE_POINTERS_DS", decode_3dstate_binding_table_pointers
},
787 { "3DSTATE_BINDING_TABLE_POINTERS_GS", decode_3dstate_binding_table_pointers
},
788 { "3DSTATE_BINDING_TABLE_POINTERS_PS", decode_3dstate_binding_table_pointers
},
790 { "3DSTATE_SAMPLER_STATE_POINTERS_VS", decode_3dstate_sampler_state_pointers
},
791 { "3DSTATE_SAMPLER_STATE_POINTERS_HS", decode_3dstate_sampler_state_pointers
},
792 { "3DSTATE_SAMPLER_STATE_POINTERS_DS", decode_3dstate_sampler_state_pointers
},
793 { "3DSTATE_SAMPLER_STATE_POINTERS_GS", decode_3dstate_sampler_state_pointers
},
794 { "3DSTATE_SAMPLER_STATE_POINTERS_PS", decode_3dstate_sampler_state_pointers
},
795 { "3DSTATE_SAMPLER_STATE_POINTERS", decode_3dstate_sampler_state_pointers_gen6
},
797 { "3DSTATE_VIEWPORT_STATE_POINTERS_CC", decode_3dstate_viewport_state_pointers_cc
},
798 { "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", decode_3dstate_viewport_state_pointers_sf_clip
},
799 { "3DSTATE_BLEND_STATE_POINTERS", decode_3dstate_blend_state_pointers
},
800 { "3DSTATE_CC_STATE_POINTERS", decode_3dstate_cc_state_pointers
},
801 { "3DSTATE_SCISSOR_STATE_POINTERS", decode_3dstate_scissor_state_pointers
},
802 { "MI_LOAD_REGISTER_IMM", decode_load_register_imm
}
806 gen_print_batch(struct gen_batch_decode_ctx
*ctx
,
807 const uint32_t *batch
, uint32_t batch_size
,
808 uint64_t batch_addr
, bool from_ring
)
810 const uint32_t *p
, *end
= batch
+ batch_size
/ sizeof(uint32_t);
812 struct gen_group
*inst
;
813 const char *reset_color
= ctx
->flags
& GEN_BATCH_DECODE_IN_COLOR
? NORMAL
: "";
815 if (ctx
->n_batch_buffer_start
>= 100) {
816 fprintf(ctx
->fp
, "%s0x%08"PRIx64
": Max batch buffer jumps exceeded%s\n",
817 (ctx
->flags
& GEN_BATCH_DECODE_IN_COLOR
) ? RED_COLOR
: "",
818 (ctx
->flags
& GEN_BATCH_DECODE_OFFSETS
) ? batch_addr
: 0,
823 ctx
->n_batch_buffer_start
++;
825 for (p
= batch
; p
< end
; p
+= length
) {
826 inst
= gen_ctx_find_instruction(ctx
, p
);
827 length
= gen_group_get_length(inst
, p
);
828 assert(inst
== NULL
|| length
> 0);
829 length
= MAX2(1, length
);
832 if (ctx
->flags
& GEN_BATCH_DECODE_OFFSETS
)
833 offset
= batch_addr
+ ((char *)p
- (char *)batch
);
838 fprintf(ctx
->fp
, "%s0x%08"PRIx64
": unknown instruction %08x%s\n",
839 (ctx
->flags
& GEN_BATCH_DECODE_IN_COLOR
) ? RED_COLOR
: "",
840 offset
, p
[0], reset_color
);
845 const char *inst_name
= gen_group_get_name(inst
);
846 if (ctx
->flags
& GEN_BATCH_DECODE_IN_COLOR
) {
847 reset_color
= NORMAL
;
848 if (ctx
->flags
& GEN_BATCH_DECODE_FULL
) {
849 if (strcmp(inst_name
, "MI_BATCH_BUFFER_START") == 0 ||
850 strcmp(inst_name
, "MI_BATCH_BUFFER_END") == 0)
851 color
= GREEN_HEADER
;
862 fprintf(ctx
->fp
, "%s0x%08"PRIx64
": 0x%08x: %-80s%s\n",
863 color
, offset
, p
[0], inst_name
, reset_color
);
865 if (ctx
->flags
& GEN_BATCH_DECODE_FULL
) {
866 ctx_print_group(ctx
, inst
, offset
, p
);
868 for (int i
= 0; i
< ARRAY_SIZE(custom_decoders
); i
++) {
869 if (strcmp(inst_name
, custom_decoders
[i
].cmd_name
) == 0) {
870 custom_decoders
[i
].decode(ctx
, p
);
876 if (strcmp(inst_name
, "MI_BATCH_BUFFER_START") == 0) {
877 uint64_t next_batch_addr
= 0;
879 bool second_level
= false;
880 struct gen_field_iterator iter
;
881 gen_field_iterator_init(&iter
, inst
, p
, 0, false);
882 while (gen_field_iterator_next(&iter
)) {
883 if (strcmp(iter
.name
, "Batch Buffer Start Address") == 0) {
884 next_batch_addr
= iter
.raw_value
;
885 } else if (strcmp(iter
.name
, "Second Level Batch Buffer") == 0) {
886 second_level
= iter
.raw_value
;
887 } else if (strcmp(iter
.name
, "Address Space Indicator") == 0) {
888 ppgtt
= iter
.raw_value
;
892 struct gen_batch_decode_bo next_batch
= ctx_get_bo(ctx
, ppgtt
, next_batch_addr
);
894 if (next_batch
.map
== NULL
) {
895 fprintf(ctx
->fp
, "Secondary batch at 0x%08"PRIx64
" unavailable\n",
898 gen_print_batch(ctx
, next_batch
.map
, next_batch
.size
,
899 next_batch
.addr
, false);
902 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" set acts
903 * like a subroutine call. Commands that come afterwards get
904 * processed once the 2nd level batch buffer returns with
905 * MI_BATCH_BUFFER_END.
908 } else if (!from_ring
) {
909 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" unset acts
910 * like a goto. Nothing after it will ever get processed. In
911 * order to prevent the recursion from growing, we just reset the
916 } else if (strcmp(inst_name
, "MI_BATCH_BUFFER_END") == 0) {
921 ctx
->n_batch_buffer_start
--;