intel/batch_decoder: Print blend states properly
[mesa.git] / src / intel / common / gen_batch_decoder.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "common/gen_decoder.h"
25 #include "gen_disasm.h"
26 #include "util/macros.h"
27
28 #include <string.h>
29
30 void
31 gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
32 const struct gen_device_info *devinfo,
33 FILE *fp, enum gen_batch_decode_flags flags,
34 const char *xml_path,
35 struct gen_batch_decode_bo (*get_bo)(void *,
36 uint64_t),
37 unsigned (*get_state_size)(void *, uint32_t),
38 void *user_data)
39 {
40 memset(ctx, 0, sizeof(*ctx));
41
42 ctx->get_bo = get_bo;
43 ctx->get_state_size = get_state_size;
44 ctx->user_data = user_data;
45 ctx->fp = fp;
46 ctx->flags = flags;
47 ctx->max_vbo_decoded_lines = -1; /* No limit! */
48
49 if (xml_path == NULL)
50 ctx->spec = gen_spec_load(devinfo);
51 else
52 ctx->spec = gen_spec_load_from_path(devinfo, xml_path);
53 ctx->disasm = gen_disasm_create(devinfo);
54 }
55
56 void
57 gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
58 {
59 gen_spec_destroy(ctx->spec);
60 gen_disasm_destroy(ctx->disasm);
61 }
62
63 #define CSI "\e["
64 #define RED_COLOR CSI "31m"
65 #define BLUE_HEADER CSI "0;44m"
66 #define GREEN_HEADER CSI "1;42m"
67 #define NORMAL CSI "0m"
68
69 static void
70 ctx_print_group(struct gen_batch_decode_ctx *ctx,
71 struct gen_group *group,
72 uint64_t address, const void *map)
73 {
74 gen_print_group(ctx->fp, group, address, map, 0,
75 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) != 0);
76 }
77
78 static struct gen_batch_decode_bo
79 ctx_get_bo(struct gen_batch_decode_ctx *ctx, uint64_t addr)
80 {
81 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
82 /* On Broadwell and above, we have 48-bit addresses which consume two
83 * dwords. Some packets require that these get stored in a "canonical
84 * form" which means that bit 47 is sign-extended through the upper
85 * bits. In order to correctly handle those aub dumps, we need to mask
86 * off the top 16 bits.
87 */
88 addr &= (~0ull >> 16);
89 }
90
91 struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, addr);
92
93 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
94 bo.addr &= (~0ull >> 16);
95
96 /* We may actually have an offset into the bo */
97 if (bo.map != NULL) {
98 assert(bo.addr <= addr);
99 uint64_t offset = addr - bo.addr;
100 bo.map += offset;
101 bo.addr += offset;
102 bo.size -= offset;
103 }
104
105 return bo;
106 }
107
108 static int
109 update_count(struct gen_batch_decode_ctx *ctx,
110 uint32_t offset_from_dsba,
111 unsigned element_dwords,
112 unsigned guess)
113 {
114 unsigned size = 0;
115
116 if (ctx->get_state_size)
117 size = ctx->get_state_size(ctx->user_data, offset_from_dsba);
118
119 if (size > 0)
120 return size / (sizeof(uint32_t) * element_dwords);
121
122 /* In the absence of any information, just guess arbitrarily. */
123 return guess;
124 }
125
126 static void
127 ctx_disassemble_program(struct gen_batch_decode_ctx *ctx,
128 uint32_t ksp, const char *type)
129 {
130 uint64_t addr = ctx->instruction_base + ksp;
131 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, addr);
132 if (!bo.map)
133 return;
134
135 fprintf(ctx->fp, "\nReferenced %s:\n", type);
136 gen_disasm_disassemble(ctx->disasm, bo.map, 0, ctx->fp);
137 }
138
139 /* Heuristic to determine whether a uint32_t is probably actually a float
140 * (http://stackoverflow.com/a/2953466)
141 */
142
143 static bool
144 probably_float(uint32_t bits)
145 {
146 int exp = ((bits & 0x7f800000U) >> 23) - 127;
147 uint32_t mant = bits & 0x007fffff;
148
149 /* +- 0.0 */
150 if (exp == -127 && mant == 0)
151 return true;
152
153 /* +- 1 billionth to 1 billion */
154 if (-30 <= exp && exp <= 30)
155 return true;
156
157 /* some value with only a few binary digits */
158 if ((mant & 0x0000ffff) == 0)
159 return true;
160
161 return false;
162 }
163
164 static void
165 ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
166 struct gen_batch_decode_bo bo,
167 uint32_t read_length,
168 uint32_t pitch,
169 int max_lines)
170 {
171 const uint32_t *dw_end = bo.map + MIN2(bo.size, read_length);
172
173 int column_count = 0, line_count = -1;
174 for (const uint32_t *dw = bo.map; dw < dw_end; dw++) {
175 if (column_count * 4 == pitch || column_count == 8) {
176 fprintf(ctx->fp, "\n");
177 column_count = 0;
178 line_count++;
179
180 if (max_lines >= 0 && line_count >= max_lines)
181 break;
182 }
183 fprintf(ctx->fp, column_count == 0 ? " " : " ");
184
185 if ((ctx->flags & GEN_BATCH_DECODE_FLOATS) && probably_float(*dw))
186 fprintf(ctx->fp, " %8.2f", *(float *) dw);
187 else
188 fprintf(ctx->fp, " 0x%08x", *dw);
189
190 column_count++;
191 }
192 fprintf(ctx->fp, "\n");
193 }
194
195 static void
196 handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
197 {
198 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
199
200 struct gen_field_iterator iter;
201 gen_field_iterator_init(&iter, inst, p, 0, false);
202
203 while (gen_field_iterator_next(&iter)) {
204 if (strcmp(iter.name, "Surface State Base Address") == 0) {
205 ctx->surface_base = iter.raw_value;
206 } else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
207 ctx->dynamic_base = iter.raw_value;
208 } else if (strcmp(iter.name, "Instruction Base Address") == 0) {
209 ctx->instruction_base = iter.raw_value;
210 }
211 }
212 }
213
214 static void
215 dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
216 {
217 struct gen_group *strct =
218 gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
219 if (strct == NULL) {
220 fprintf(ctx->fp, "did not find RENDER_SURFACE_STATE info\n");
221 return;
222 }
223
224 if (count < 0)
225 count = update_count(ctx, offset, 1, 8);
226
227 if (offset % 32 != 0 || offset >= UINT16_MAX) {
228 fprintf(ctx->fp, " invalid binding table pointer\n");
229 return;
230 }
231
232 struct gen_batch_decode_bo bind_bo =
233 ctx_get_bo(ctx, ctx->surface_base + offset);
234
235 if (bind_bo.map == NULL) {
236 fprintf(ctx->fp, " binding table unavailable\n");
237 return;
238 }
239
240 const uint32_t *pointers = bind_bo.map;
241 for (int i = 0; i < count; i++) {
242 if (pointers[i] == 0)
243 continue;
244
245 uint64_t addr = ctx->surface_base + pointers[i];
246 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, addr);
247 uint32_t size = strct->dw_length * 4;
248
249 if (pointers[i] % 32 != 0 ||
250 addr < bo.addr || addr + size >= bo.addr + bo.size) {
251 fprintf(ctx->fp, "pointer %u: %08x <not valid>\n", i, pointers[i]);
252 continue;
253 }
254
255 fprintf(ctx->fp, "pointer %u: %08x\n", i, pointers[i]);
256 ctx_print_group(ctx, strct, addr, bo.map + (addr - bo.addr));
257 }
258 }
259
260 static void
261 dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
262 {
263 struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
264
265 if (count < 0)
266 count = update_count(ctx, offset, strct->dw_length, 4);
267
268 uint64_t state_addr = ctx->dynamic_base + offset;
269 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, state_addr);
270 const void *state_map = bo.map;
271
272 if (state_map == NULL) {
273 fprintf(ctx->fp, " samplers unavailable\n");
274 return;
275 }
276
277 if (offset % 32 != 0 || state_addr - bo.addr >= bo.size) {
278 fprintf(ctx->fp, " invalid sampler state pointer\n");
279 return;
280 }
281
282 for (int i = 0; i < count; i++) {
283 fprintf(ctx->fp, "sampler state %d\n", i);
284 ctx_print_group(ctx, strct, state_addr, state_map);
285 state_addr += 16;
286 state_map += 16;
287 }
288 }
289
290 static void
291 handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
292 const uint32_t *p)
293 {
294 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
295 struct gen_group *desc =
296 gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
297
298 struct gen_field_iterator iter;
299 gen_field_iterator_init(&iter, inst, p, 0, false);
300 uint32_t descriptor_offset = 0;
301 int descriptor_count = 0;
302 while (gen_field_iterator_next(&iter)) {
303 if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
304 descriptor_offset = strtol(iter.value, NULL, 16);
305 } else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
306 descriptor_count =
307 strtol(iter.value, NULL, 16) / (desc->dw_length * 4);
308 }
309 }
310
311 uint64_t desc_addr = ctx->dynamic_base + descriptor_offset;
312 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, desc_addr);
313 const void *desc_map = bo.map;
314
315 if (desc_map == NULL) {
316 fprintf(ctx->fp, " interface descriptors unavailable\n");
317 return;
318 }
319
320 for (int i = 0; i < descriptor_count; i++) {
321 fprintf(ctx->fp, "descriptor %d: %08x\n", i, descriptor_offset);
322
323 ctx_print_group(ctx, desc, desc_addr, desc_map);
324
325 gen_field_iterator_init(&iter, desc, desc_map, 0, false);
326 uint64_t ksp = 0;
327 uint32_t sampler_offset = 0, sampler_count = 0;
328 uint32_t binding_table_offset = 0, binding_entry_count = 0;
329 while (gen_field_iterator_next(&iter)) {
330 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
331 ksp = strtoll(iter.value, NULL, 16);
332 } else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
333 sampler_offset = strtol(iter.value, NULL, 16);
334 } else if (strcmp(iter.name, "Sampler Count") == 0) {
335 sampler_count = strtol(iter.value, NULL, 10);
336 } else if (strcmp(iter.name, "Binding Table Pointer") == 0) {
337 binding_table_offset = strtol(iter.value, NULL, 16);
338 } else if (strcmp(iter.name, "Binding Table Entry Count") == 0) {
339 binding_entry_count = strtol(iter.value, NULL, 10);
340 }
341 }
342
343 ctx_disassemble_program(ctx, ksp, "compute shader");
344 printf("\n");
345
346 dump_samplers(ctx, sampler_offset, sampler_count);
347 dump_binding_table(ctx, binding_table_offset, binding_entry_count);
348
349 desc_map += desc->dw_length;
350 desc_addr += desc->dw_length * 4;
351 }
352 }
353
354 static void
355 handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
356 const uint32_t *p)
357 {
358 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
359 struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
360
361 struct gen_batch_decode_bo vb = {};
362 uint32_t vb_size = 0;
363 int index = -1;
364 int pitch = -1;
365 bool ready = false;
366
367 struct gen_field_iterator iter;
368 gen_field_iterator_init(&iter, inst, p, 0, false);
369 while (gen_field_iterator_next(&iter)) {
370 if (iter.struct_desc != vbs)
371 continue;
372
373 struct gen_field_iterator vbs_iter;
374 gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
375 while (gen_field_iterator_next(&vbs_iter)) {
376 if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
377 index = vbs_iter.raw_value;
378 } else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
379 pitch = vbs_iter.raw_value;
380 } else if (strcmp(vbs_iter.name, "Buffer Starting Address") == 0) {
381 vb = ctx_get_bo(ctx, vbs_iter.raw_value);
382 } else if (strcmp(vbs_iter.name, "Buffer Size") == 0) {
383 vb_size = vbs_iter.raw_value;
384 ready = true;
385 } else if (strcmp(vbs_iter.name, "End Address") == 0) {
386 if (vb.map && vbs_iter.raw_value >= vb.addr)
387 vb_size = vbs_iter.raw_value - vb.addr;
388 else
389 vb_size = 0;
390 ready = true;
391 }
392
393 if (!ready)
394 continue;
395
396 fprintf(ctx->fp, "vertex buffer %d, size %d\n", index, vb_size);
397
398 if (vb.map == NULL) {
399 fprintf(ctx->fp, " buffer contents unavailable\n");
400 continue;
401 }
402
403 if (vb.map == 0 || vb_size == 0)
404 continue;
405
406 ctx_print_buffer(ctx, vb, vb_size, pitch, ctx->max_vbo_decoded_lines);
407
408 vb.map = NULL;
409 vb_size = 0;
410 index = -1;
411 pitch = -1;
412 ready = false;
413 }
414 }
415 }
416
417 static void
418 handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
419 const uint32_t *p)
420 {
421 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
422
423 struct gen_batch_decode_bo ib = {};
424 uint32_t ib_size = 0;
425 uint32_t format = 0;
426
427 struct gen_field_iterator iter;
428 gen_field_iterator_init(&iter, inst, p, 0, false);
429 while (gen_field_iterator_next(&iter)) {
430 if (strcmp(iter.name, "Index Format") == 0) {
431 format = iter.raw_value;
432 } else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
433 ib = ctx_get_bo(ctx, iter.raw_value);
434 } else if (strcmp(iter.name, "Buffer Size") == 0) {
435 ib_size = iter.raw_value;
436 }
437 }
438
439 if (ib.map == NULL) {
440 fprintf(ctx->fp, " buffer contents unavailable\n");
441 return;
442 }
443
444 const void *m = ib.map;
445 const void *ib_end = ib.map + MIN2(ib.size, ib_size);
446 for (int i = 0; m < ib_end && i < 10; i++) {
447 switch (format) {
448 case 0:
449 fprintf(ctx->fp, "%3d ", *(uint8_t *)m);
450 m += 1;
451 break;
452 case 1:
453 fprintf(ctx->fp, "%3d ", *(uint16_t *)m);
454 m += 2;
455 break;
456 case 2:
457 fprintf(ctx->fp, "%3d ", *(uint32_t *)m);
458 m += 4;
459 break;
460 }
461 }
462
463 if (m < ib_end)
464 fprintf(ctx->fp, "...");
465 fprintf(ctx->fp, "\n");
466 }
467
468 static void
469 decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
470 {
471 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
472
473 uint64_t ksp = 0;
474 bool is_simd8 = false; /* vertex shaders on Gen8+ only */
475 bool is_enabled = true;
476
477 struct gen_field_iterator iter;
478 gen_field_iterator_init(&iter, inst, p, 0, false);
479 while (gen_field_iterator_next(&iter)) {
480 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
481 ksp = iter.raw_value;
482 } else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
483 is_simd8 = iter.raw_value;
484 } else if (strcmp(iter.name, "Dispatch Mode") == 0) {
485 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
486 } else if (strcmp(iter.name, "Dispatch Enable") == 0) {
487 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
488 } else if (strcmp(iter.name, "Enable") == 0) {
489 is_enabled = iter.raw_value;
490 }
491 }
492
493 const char *type =
494 strcmp(inst->name, "VS_STATE") == 0 ? "vertex shader" :
495 strcmp(inst->name, "GS_STATE") == 0 ? "geometry shader" :
496 strcmp(inst->name, "SF_STATE") == 0 ? "strips and fans shader" :
497 strcmp(inst->name, "CLIP_STATE") == 0 ? "clip shader" :
498 strcmp(inst->name, "3DSTATE_DS") == 0 ? "tessellation evaluation shader" :
499 strcmp(inst->name, "3DSTATE_HS") == 0 ? "tessellation control shader" :
500 strcmp(inst->name, "3DSTATE_VS") == 0 ? (is_simd8 ? "SIMD8 vertex shader" : "vec4 vertex shader") :
501 strcmp(inst->name, "3DSTATE_GS") == 0 ? (is_simd8 ? "SIMD8 geometry shader" : "vec4 geometry shader") :
502 NULL;
503
504 if (is_enabled) {
505 ctx_disassemble_program(ctx, ksp, type);
506 printf("\n");
507 }
508 }
509
510 static void
511 decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
512 {
513 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
514
515 uint64_t ksp[3] = {0, 0, 0};
516 bool enabled[3] = {false, false, false};
517
518 struct gen_field_iterator iter;
519 gen_field_iterator_init(&iter, inst, p, 0, false);
520 while (gen_field_iterator_next(&iter)) {
521 if (strncmp(iter.name, "Kernel Start Pointer ",
522 strlen("Kernel Start Pointer ")) == 0) {
523 int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
524 ksp[idx] = strtol(iter.value, NULL, 16);
525 } else if (strcmp(iter.name, "8 Pixel Dispatch Enable") == 0) {
526 enabled[0] = strcmp(iter.value, "true") == 0;
527 } else if (strcmp(iter.name, "16 Pixel Dispatch Enable") == 0) {
528 enabled[1] = strcmp(iter.value, "true") == 0;
529 } else if (strcmp(iter.name, "32 Pixel Dispatch Enable") == 0) {
530 enabled[2] = strcmp(iter.value, "true") == 0;
531 }
532 }
533
534 /* Reorder KSPs to be [8, 16, 32] instead of the hardware order. */
535 if (enabled[0] + enabled[1] + enabled[2] == 1) {
536 if (enabled[1]) {
537 ksp[1] = ksp[0];
538 ksp[0] = 0;
539 } else if (enabled[2]) {
540 ksp[2] = ksp[0];
541 ksp[0] = 0;
542 }
543 } else {
544 uint64_t tmp = ksp[1];
545 ksp[1] = ksp[2];
546 ksp[2] = tmp;
547 }
548
549 if (enabled[0])
550 ctx_disassemble_program(ctx, ksp[0], "SIMD8 fragment shader");
551 if (enabled[1])
552 ctx_disassemble_program(ctx, ksp[1], "SIMD16 fragment shader");
553 if (enabled[2])
554 ctx_disassemble_program(ctx, ksp[2], "SIMD32 fragment shader");
555 fprintf(ctx->fp, "\n");
556 }
557
558 static void
559 decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
560 {
561 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
562 struct gen_group *body =
563 gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
564
565 uint32_t read_length[4] = {0};
566 uint64_t read_addr[4];
567
568 struct gen_field_iterator outer;
569 gen_field_iterator_init(&outer, inst, p, 0, false);
570 while (gen_field_iterator_next(&outer)) {
571 if (outer.struct_desc != body)
572 continue;
573
574 struct gen_field_iterator iter;
575 gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
576 0, false);
577
578 while (gen_field_iterator_next(&iter)) {
579 int idx;
580 if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
581 read_length[idx] = iter.raw_value;
582 } else if (sscanf(iter.name, "Buffer[%d]", &idx) == 1) {
583 read_addr[idx] = iter.raw_value;
584 }
585 }
586
587 for (int i = 0; i < 4; i++) {
588 if (read_length[i] == 0)
589 continue;
590
591 struct gen_batch_decode_bo buffer = ctx_get_bo(ctx, read_addr[i]);
592 if (!buffer.map) {
593 fprintf(ctx->fp, "constant buffer %d unavailable\n", i);
594 continue;
595 }
596
597 unsigned size = read_length[i] * 32;
598 fprintf(ctx->fp, "constant buffer %d, size %u\n", i, size);
599
600 ctx_print_buffer(ctx, buffer, size, 0, -1);
601 }
602 }
603 }
604
605 static void
606 decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
607 const uint32_t *p)
608 {
609 dump_binding_table(ctx, p[1], -1);
610 }
611
612 static void
613 decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx *ctx,
614 const uint32_t *p)
615 {
616 dump_samplers(ctx, p[1], -1);
617 }
618
619 static void
620 decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx *ctx,
621 const uint32_t *p)
622 {
623 dump_samplers(ctx, p[1], -1);
624 dump_samplers(ctx, p[2], -1);
625 dump_samplers(ctx, p[3], -1);
626 }
627
628 static bool
629 str_ends_with(const char *str, const char *end)
630 {
631 int offset = strlen(str) - strlen(end);
632 if (offset < 0)
633 return false;
634
635 return strcmp(str + offset, end) == 0;
636 }
637
638 static void
639 decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
640 const char *struct_type, const uint32_t *p,
641 int count)
642 {
643 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
644
645 uint32_t state_offset = 0;
646
647 struct gen_field_iterator iter;
648 gen_field_iterator_init(&iter, inst, p, 0, false);
649 while (gen_field_iterator_next(&iter)) {
650 if (str_ends_with(iter.name, "Pointer")) {
651 state_offset = iter.raw_value;
652 break;
653 }
654 }
655
656 uint64_t state_addr = ctx->dynamic_base + state_offset;
657 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, state_addr);
658 const void *state_map = bo.map;
659
660 if (state_map == NULL) {
661 fprintf(ctx->fp, " dynamic %s state unavailable\n", struct_type);
662 return;
663 }
664
665 struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
666 if (strcmp(struct_type, "BLEND_STATE") == 0) {
667 /* Blend states are different from the others because they have a header
668 * struct called BLEND_STATE which is followed by a variable number of
669 * BLEND_STATE_ENTRY structs.
670 */
671 fprintf(ctx->fp, "%s\n", struct_type);
672 ctx_print_group(ctx, state, state_addr, state_map);
673
674 state_addr += state->dw_length * 4;
675 state_map += state->dw_length * 4;
676
677 struct_type = "BLEND_STATE_ENTRY";
678 state = gen_spec_find_struct(ctx->spec, struct_type);
679 }
680
681 for (int i = 0; i < count; i++) {
682 fprintf(ctx->fp, "%s %d\n", struct_type, i);
683 ctx_print_group(ctx, state, state_addr, state_map);
684
685 state_addr += state->dw_length * 4;
686 state_map += state->dw_length * 4;
687 }
688 }
689
690 static void
691 decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx *ctx,
692 const uint32_t *p)
693 {
694 decode_dynamic_state_pointers(ctx, "CC_VIEWPORT", p, 4);
695 }
696
697 static void
698 decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx *ctx,
699 const uint32_t *p)
700 {
701 decode_dynamic_state_pointers(ctx, "SF_CLIP_VIEWPORT", p, 4);
702 }
703
704 static void
705 decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx *ctx,
706 const uint32_t *p)
707 {
708 decode_dynamic_state_pointers(ctx, "BLEND_STATE", p, 1);
709 }
710
711 static void
712 decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx *ctx,
713 const uint32_t *p)
714 {
715 decode_dynamic_state_pointers(ctx, "COLOR_CALC_STATE", p, 1);
716 }
717
718 static void
719 decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx *ctx,
720 const uint32_t *p)
721 {
722 decode_dynamic_state_pointers(ctx, "SCISSOR_RECT", p, 1);
723 }
724
725 static void
726 decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
727 {
728 struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
729
730 if (reg != NULL) {
731 fprintf(ctx->fp, "register %s (0x%x): 0x%x\n",
732 reg->name, reg->register_offset, p[2]);
733 ctx_print_group(ctx, reg, reg->register_offset, &p[2]);
734 }
735 }
736
737 struct custom_decoder {
738 const char *cmd_name;
739 void (*decode)(struct gen_batch_decode_ctx *ctx, const uint32_t *p);
740 } custom_decoders[] = {
741 { "STATE_BASE_ADDRESS", handle_state_base_address },
742 { "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load },
743 { "3DSTATE_VERTEX_BUFFERS", handle_3dstate_vertex_buffers },
744 { "3DSTATE_INDEX_BUFFER", handle_3dstate_index_buffer },
745 { "3DSTATE_VS", decode_single_ksp },
746 { "3DSTATE_GS", decode_single_ksp },
747 { "3DSTATE_DS", decode_single_ksp },
748 { "3DSTATE_HS", decode_single_ksp },
749 { "3DSTATE_PS", decode_ps_kernels },
750 { "3DSTATE_CONSTANT_VS", decode_3dstate_constant },
751 { "3DSTATE_CONSTANT_GS", decode_3dstate_constant },
752 { "3DSTATE_CONSTANT_PS", decode_3dstate_constant },
753 { "3DSTATE_CONSTANT_HS", decode_3dstate_constant },
754 { "3DSTATE_CONSTANT_DS", decode_3dstate_constant },
755
756 { "3DSTATE_BINDING_TABLE_POINTERS_VS", decode_3dstate_binding_table_pointers },
757 { "3DSTATE_BINDING_TABLE_POINTERS_HS", decode_3dstate_binding_table_pointers },
758 { "3DSTATE_BINDING_TABLE_POINTERS_DS", decode_3dstate_binding_table_pointers },
759 { "3DSTATE_BINDING_TABLE_POINTERS_GS", decode_3dstate_binding_table_pointers },
760 { "3DSTATE_BINDING_TABLE_POINTERS_PS", decode_3dstate_binding_table_pointers },
761
762 { "3DSTATE_SAMPLER_STATE_POINTERS_VS", decode_3dstate_sampler_state_pointers },
763 { "3DSTATE_SAMPLER_STATE_POINTERS_HS", decode_3dstate_sampler_state_pointers },
764 { "3DSTATE_SAMPLER_STATE_POINTERS_DS", decode_3dstate_sampler_state_pointers },
765 { "3DSTATE_SAMPLER_STATE_POINTERS_GS", decode_3dstate_sampler_state_pointers },
766 { "3DSTATE_SAMPLER_STATE_POINTERS_PS", decode_3dstate_sampler_state_pointers },
767 { "3DSTATE_SAMPLER_STATE_POINTERS", decode_3dstate_sampler_state_pointers_gen6 },
768
769 { "3DSTATE_VIEWPORT_STATE_POINTERS_CC", decode_3dstate_viewport_state_pointers_cc },
770 { "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", decode_3dstate_viewport_state_pointers_sf_clip },
771 { "3DSTATE_BLEND_STATE_POINTERS", decode_3dstate_blend_state_pointers },
772 { "3DSTATE_CC_STATE_POINTERS", decode_3dstate_cc_state_pointers },
773 { "3DSTATE_SCISSOR_STATE_POINTERS", decode_3dstate_scissor_state_pointers },
774 { "MI_LOAD_REGISTER_IMM", decode_load_register_imm }
775 };
776
777 static inline uint64_t
778 get_address(struct gen_spec *spec, const uint32_t *p)
779 {
780 /* Addresses are always guaranteed to be page-aligned and sometimes
781 * hardware packets have extra stuff stuffed in the bottom 12 bits.
782 */
783 uint64_t addr = p[0] & ~0xfffu;
784
785 if (gen_spec_get_gen(spec) >= gen_make_gen(8,0)) {
786 /* On Broadwell and above, we have 48-bit addresses which consume two
787 * dwords. Some packets require that these get stored in a "canonical
788 * form" which means that bit 47 is sign-extended through the upper
789 * bits. In order to correctly handle those aub dumps, we need to mask
790 * off the top 16 bits.
791 */
792 addr |= ((uint64_t)p[1] & 0xffff) << 32;
793 }
794
795 return addr;
796 }
797
798 void
799 gen_print_batch(struct gen_batch_decode_ctx *ctx,
800 const uint32_t *batch, uint32_t batch_size,
801 uint64_t batch_addr)
802 {
803 const uint32_t *p, *end = batch + batch_size;
804 int length;
805 struct gen_group *inst;
806
807 for (p = batch; p < end; p += length) {
808 inst = gen_spec_find_instruction(ctx->spec, p);
809 length = gen_group_get_length(inst, p);
810 assert(inst == NULL || length > 0);
811 length = MAX2(1, length);
812
813 const char *reset_color = ctx->flags & GEN_BATCH_DECODE_IN_COLOR ? NORMAL : "";
814
815 uint64_t offset;
816 if (ctx->flags & GEN_BATCH_DECODE_OFFSETS)
817 offset = batch_addr + ((char *)p - (char *)batch);
818 else
819 offset = 0;
820
821 if (inst == NULL) {
822 fprintf(ctx->fp, "%s0x%08"PRIx64": unknown instruction %08x%s\n",
823 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) ? RED_COLOR : "",
824 offset, p[0], reset_color);
825 continue;
826 }
827
828 const char *color;
829 const char *inst_name = gen_group_get_name(inst);
830 if (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) {
831 reset_color = NORMAL;
832 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
833 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0 ||
834 strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0)
835 color = GREEN_HEADER;
836 else
837 color = BLUE_HEADER;
838 } else {
839 color = NORMAL;
840 }
841 } else {
842 color = "";
843 reset_color = "";
844 }
845
846 fprintf(ctx->fp, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n",
847 color, offset, p[0], inst_name, reset_color);
848
849 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
850 ctx_print_group(ctx, inst, offset, p);
851
852 for (int i = 0; i < ARRAY_SIZE(custom_decoders); i++) {
853 if (strcmp(inst_name, custom_decoders[i].cmd_name) == 0) {
854 custom_decoders[i].decode(ctx, p);
855 break;
856 }
857 }
858 }
859
860 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0) {
861 struct gen_batch_decode_bo next_batch = {};
862 bool second_level;
863 struct gen_field_iterator iter;
864 gen_field_iterator_init(&iter, inst, p, 0, false);
865 while (gen_field_iterator_next(&iter)) {
866 if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
867 next_batch = ctx_get_bo(ctx, iter.raw_value);
868 } else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
869 second_level = iter.raw_value;
870 }
871 }
872
873 if (next_batch.map == NULL) {
874 fprintf(ctx->fp, "Secondary batch at 0x%08"PRIx64" unavailable\n",
875 next_batch.addr);
876 } else {
877 gen_print_batch(ctx, next_batch.map, next_batch.size,
878 next_batch.addr);
879 }
880 if (second_level) {
881 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" set acts
882 * like a subroutine call. Commands that come afterwards get
883 * processed once the 2nd level batch buffer returns with
884 * MI_BATCH_BUFFER_END.
885 */
886 continue;
887 } else {
888 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" unset acts
889 * like a goto. Nothing after it will ever get processed. In
890 * order to prevent the recursion from growing, we just reset the
891 * loop and continue;
892 */
893 break;
894 }
895 } else if (strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0) {
896 break;
897 }
898 }
899 }