intel: decoder: remove unused variable
[mesa.git] / src / intel / common / gen_batch_decoder.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "common/gen_decoder.h"
25 #include "gen_disasm.h"
26
27 #include <string.h>
28
29 void
30 gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
31 const struct gen_device_info *devinfo,
32 FILE *fp, enum gen_batch_decode_flags flags,
33 const char *xml_path,
34 struct gen_batch_decode_bo (*get_bo)(void *,
35 uint64_t),
36 unsigned (*get_state_size)(void *, uint32_t),
37 void *user_data)
38 {
39 memset(ctx, 0, sizeof(*ctx));
40
41 ctx->get_bo = get_bo;
42 ctx->get_state_size = get_state_size;
43 ctx->user_data = user_data;
44 ctx->fp = fp;
45 ctx->flags = flags;
46 ctx->max_vbo_decoded_lines = -1; /* No limit! */
47
48 if (xml_path == NULL)
49 ctx->spec = gen_spec_load(devinfo);
50 else
51 ctx->spec = gen_spec_load_from_path(devinfo, xml_path);
52 ctx->disasm = gen_disasm_create(devinfo);
53 }
54
55 void
56 gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
57 {
58 gen_spec_destroy(ctx->spec);
59 gen_disasm_destroy(ctx->disasm);
60 }
61
62 #define CSI "\e["
63 #define RED_COLOR CSI "31m"
64 #define BLUE_HEADER CSI "0;44m"
65 #define GREEN_HEADER CSI "1;42m"
66 #define NORMAL CSI "0m"
67
68 #define ARRAY_LENGTH(a) (sizeof (a) / sizeof (a)[0])
69
70 static void
71 ctx_print_group(struct gen_batch_decode_ctx *ctx,
72 struct gen_group *group,
73 uint64_t address, const void *map)
74 {
75 gen_print_group(ctx->fp, group, address, map, 0,
76 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) != 0);
77 }
78
79 static struct gen_batch_decode_bo
80 ctx_get_bo(struct gen_batch_decode_ctx *ctx, uint64_t addr)
81 {
82 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
83 /* On Broadwell and above, we have 48-bit addresses which consume two
84 * dwords. Some packets require that these get stored in a "canonical
85 * form" which means that bit 47 is sign-extended through the upper
86 * bits. In order to correctly handle those aub dumps, we need to mask
87 * off the top 16 bits.
88 */
89 addr &= (~0ull >> 16);
90 }
91
92 struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, addr);
93
94 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
95 bo.addr &= (~0ull >> 16);
96
97 /* We may actually have an offset into the bo */
98 if (bo.map != NULL) {
99 assert(bo.addr <= addr);
100 uint64_t offset = addr - bo.addr;
101 bo.map += offset;
102 bo.addr += offset;
103 bo.size -= offset;
104 }
105
106 return bo;
107 }
108
109 static int
110 update_count(struct gen_batch_decode_ctx *ctx,
111 uint32_t offset_from_dsba,
112 unsigned element_dwords,
113 unsigned guess)
114 {
115 unsigned size = 0;
116
117 if (ctx->get_state_size)
118 size = ctx->get_state_size(ctx->user_data, offset_from_dsba);
119
120 if (size > 0)
121 return size / (sizeof(uint32_t) * element_dwords);
122
123 /* In the absence of any information, just guess arbitrarily. */
124 return guess;
125 }
126
127 static void
128 ctx_disassemble_program(struct gen_batch_decode_ctx *ctx,
129 uint32_t ksp, const char *type)
130 {
131 uint64_t addr = ctx->instruction_base + ksp;
132 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, addr);
133 if (!bo.map)
134 return;
135
136 fprintf(ctx->fp, "\nReferenced %s:\n", type);
137 gen_disasm_disassemble(ctx->disasm, bo.map, 0, ctx->fp);
138 }
139
140 /* Heuristic to determine whether a uint32_t is probably actually a float
141 * (http://stackoverflow.com/a/2953466)
142 */
143
144 static bool
145 probably_float(uint32_t bits)
146 {
147 int exp = ((bits & 0x7f800000U) >> 23) - 127;
148 uint32_t mant = bits & 0x007fffff;
149
150 /* +- 0.0 */
151 if (exp == -127 && mant == 0)
152 return true;
153
154 /* +- 1 billionth to 1 billion */
155 if (-30 <= exp && exp <= 30)
156 return true;
157
158 /* some value with only a few binary digits */
159 if ((mant & 0x0000ffff) == 0)
160 return true;
161
162 return false;
163 }
164
165 static void
166 ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
167 struct gen_batch_decode_bo bo,
168 uint32_t read_length,
169 uint32_t pitch,
170 int max_lines)
171 {
172 const uint32_t *dw_end = bo.map + MIN2(bo.size, read_length);
173
174 int column_count = 0, line_count = -1;
175 for (const uint32_t *dw = bo.map; dw < dw_end; dw++) {
176 if (column_count * 4 == pitch || column_count == 8) {
177 fprintf(ctx->fp, "\n");
178 column_count = 0;
179 line_count++;
180
181 if (max_lines >= 0 && line_count >= max_lines)
182 break;
183 }
184 fprintf(ctx->fp, column_count == 0 ? " " : " ");
185
186 if ((ctx->flags & GEN_BATCH_DECODE_FLOATS) && probably_float(*dw))
187 fprintf(ctx->fp, " %8.2f", *(float *) dw);
188 else
189 fprintf(ctx->fp, " 0x%08x", *dw);
190
191 column_count++;
192 }
193 fprintf(ctx->fp, "\n");
194 }
195
196 static void
197 handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
198 {
199 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
200
201 struct gen_field_iterator iter;
202 gen_field_iterator_init(&iter, inst, p, 0, false);
203
204 while (gen_field_iterator_next(&iter)) {
205 if (strcmp(iter.name, "Surface State Base Address") == 0) {
206 ctx->surface_base = iter.raw_value;
207 } else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
208 ctx->dynamic_base = iter.raw_value;
209 } else if (strcmp(iter.name, "Instruction Base Address") == 0) {
210 ctx->instruction_base = iter.raw_value;
211 }
212 }
213 }
214
215 static void
216 dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
217 {
218 struct gen_group *strct =
219 gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
220 if (strct == NULL) {
221 fprintf(ctx->fp, "did not find RENDER_SURFACE_STATE info\n");
222 return;
223 }
224
225 if (count < 0)
226 count = update_count(ctx, offset, 1, 8);
227
228 if (offset % 32 != 0 || offset >= UINT16_MAX) {
229 fprintf(ctx->fp, " invalid binding table pointer\n");
230 return;
231 }
232
233 struct gen_batch_decode_bo bind_bo =
234 ctx_get_bo(ctx, ctx->surface_base + offset);
235
236 if (bind_bo.map == NULL) {
237 fprintf(ctx->fp, " binding table unavailable\n");
238 return;
239 }
240
241 const uint32_t *pointers = bind_bo.map;
242 for (int i = 0; i < count; i++) {
243 if (pointers[i] == 0)
244 continue;
245
246 uint64_t addr = ctx->surface_base + pointers[i];
247 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, addr);
248 uint32_t size = strct->dw_length * 4;
249
250 if (pointers[i] % 32 != 0 ||
251 addr < bo.addr || addr + size >= bo.addr + bo.size) {
252 fprintf(ctx->fp, "pointer %u: %08x <not valid>\n", i, pointers[i]);
253 continue;
254 }
255
256 fprintf(ctx->fp, "pointer %u: %08x\n", i, pointers[i]);
257 ctx_print_group(ctx, strct, addr, bo.map + (addr - bo.addr));
258 }
259 }
260
261 static void
262 dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
263 {
264 struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
265
266 if (count < 0)
267 count = update_count(ctx, offset, strct->dw_length, 4);
268
269 uint64_t state_addr = ctx->dynamic_base + offset;
270 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, state_addr);
271 const void *state_map = bo.map;
272
273 if (state_map == NULL) {
274 fprintf(ctx->fp, " samplers unavailable\n");
275 return;
276 }
277
278 if (offset % 32 != 0 || state_addr - bo.addr >= bo.size) {
279 fprintf(ctx->fp, " invalid sampler state pointer\n");
280 return;
281 }
282
283 for (int i = 0; i < count; i++) {
284 fprintf(ctx->fp, "sampler state %d\n", i);
285 ctx_print_group(ctx, strct, state_addr, state_map);
286 state_addr += 16;
287 state_map += 16;
288 }
289 }
290
291 static void
292 handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
293 const uint32_t *p)
294 {
295 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
296 struct gen_group *desc =
297 gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
298
299 struct gen_field_iterator iter;
300 gen_field_iterator_init(&iter, inst, p, 0, false);
301 uint32_t descriptor_offset = 0;
302 int descriptor_count = 0;
303 while (gen_field_iterator_next(&iter)) {
304 if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
305 descriptor_offset = strtol(iter.value, NULL, 16);
306 } else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
307 descriptor_count =
308 strtol(iter.value, NULL, 16) / (desc->dw_length * 4);
309 }
310 }
311
312 uint64_t desc_addr = ctx->dynamic_base + descriptor_offset;
313 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, desc_addr);
314 const void *desc_map = bo.map;
315
316 if (desc_map == NULL) {
317 fprintf(ctx->fp, " interface descriptors unavailable\n");
318 return;
319 }
320
321 for (int i = 0; i < descriptor_count; i++) {
322 fprintf(ctx->fp, "descriptor %d: %08x\n", i, descriptor_offset);
323
324 ctx_print_group(ctx, desc, desc_addr, desc_map);
325
326 gen_field_iterator_init(&iter, desc, desc_map, 0, false);
327 uint64_t ksp = 0;
328 uint32_t sampler_offset = 0, sampler_count = 0;
329 uint32_t binding_table_offset = 0, binding_entry_count = 0;
330 while (gen_field_iterator_next(&iter)) {
331 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
332 ksp = strtoll(iter.value, NULL, 16);
333 } else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
334 sampler_offset = strtol(iter.value, NULL, 16);
335 } else if (strcmp(iter.name, "Sampler Count") == 0) {
336 sampler_count = strtol(iter.value, NULL, 10);
337 } else if (strcmp(iter.name, "Binding Table Pointer") == 0) {
338 binding_table_offset = strtol(iter.value, NULL, 16);
339 } else if (strcmp(iter.name, "Binding Table Entry Count") == 0) {
340 binding_entry_count = strtol(iter.value, NULL, 10);
341 }
342 }
343
344 ctx_disassemble_program(ctx, ksp, "compute shader");
345 printf("\n");
346
347 dump_samplers(ctx, sampler_offset, sampler_count);
348 dump_binding_table(ctx, binding_table_offset, binding_entry_count);
349
350 desc_map += desc->dw_length;
351 desc_addr += desc->dw_length * 4;
352 }
353 }
354
355 static void
356 handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
357 const uint32_t *p)
358 {
359 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
360 struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
361
362 struct gen_batch_decode_bo vb = {};
363 uint32_t vb_size = 0;
364 int index = -1;
365 int pitch = -1;
366 bool ready = false;
367
368 struct gen_field_iterator iter;
369 gen_field_iterator_init(&iter, inst, p, 0, false);
370 while (gen_field_iterator_next(&iter)) {
371 if (iter.struct_desc != vbs)
372 continue;
373
374 struct gen_field_iterator vbs_iter;
375 gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
376 while (gen_field_iterator_next(&vbs_iter)) {
377 if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
378 index = vbs_iter.raw_value;
379 } else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
380 pitch = vbs_iter.raw_value;
381 } else if (strcmp(vbs_iter.name, "Buffer Starting Address") == 0) {
382 vb = ctx_get_bo(ctx, vbs_iter.raw_value);
383 } else if (strcmp(vbs_iter.name, "Buffer Size") == 0) {
384 vb_size = vbs_iter.raw_value;
385 ready = true;
386 } else if (strcmp(vbs_iter.name, "End Address") == 0) {
387 if (vb.map && vbs_iter.raw_value >= vb.addr)
388 vb_size = vbs_iter.raw_value - vb.addr;
389 else
390 vb_size = 0;
391 ready = true;
392 }
393
394 if (!ready)
395 continue;
396
397 fprintf(ctx->fp, "vertex buffer %d, size %d\n", index, vb_size);
398
399 if (vb.map == NULL) {
400 fprintf(ctx->fp, " buffer contents unavailable\n");
401 continue;
402 }
403
404 if (vb.map == 0 || vb_size == 0)
405 continue;
406
407 ctx_print_buffer(ctx, vb, vb_size, pitch, ctx->max_vbo_decoded_lines);
408
409 vb.map = NULL;
410 vb_size = 0;
411 index = -1;
412 pitch = -1;
413 ready = false;
414 }
415 }
416 }
417
418 static void
419 handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
420 const uint32_t *p)
421 {
422 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
423
424 struct gen_batch_decode_bo ib = {};
425 uint32_t ib_size = 0;
426 uint32_t format = 0;
427
428 struct gen_field_iterator iter;
429 gen_field_iterator_init(&iter, inst, p, 0, false);
430 while (gen_field_iterator_next(&iter)) {
431 if (strcmp(iter.name, "Index Format") == 0) {
432 format = iter.raw_value;
433 } else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
434 ib = ctx_get_bo(ctx, iter.raw_value);
435 } else if (strcmp(iter.name, "Buffer Size") == 0) {
436 ib_size = iter.raw_value;
437 }
438 }
439
440 if (ib.map == NULL) {
441 fprintf(ctx->fp, " buffer contents unavailable\n");
442 return;
443 }
444
445 const void *m = ib.map;
446 const void *ib_end = ib.map + MIN2(ib.size, ib_size);
447 for (int i = 0; m < ib_end && i < 10; i++) {
448 switch (format) {
449 case 0:
450 fprintf(ctx->fp, "%3d ", *(uint8_t *)m);
451 m += 1;
452 break;
453 case 1:
454 fprintf(ctx->fp, "%3d ", *(uint16_t *)m);
455 m += 2;
456 break;
457 case 2:
458 fprintf(ctx->fp, "%3d ", *(uint32_t *)m);
459 m += 4;
460 break;
461 }
462 }
463
464 if (m < ib_end)
465 fprintf(ctx->fp, "...");
466 fprintf(ctx->fp, "\n");
467 }
468
469 static void
470 decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
471 {
472 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
473
474 uint64_t ksp = 0;
475 bool is_simd8 = false; /* vertex shaders on Gen8+ only */
476 bool is_enabled = true;
477
478 struct gen_field_iterator iter;
479 gen_field_iterator_init(&iter, inst, p, 0, false);
480 while (gen_field_iterator_next(&iter)) {
481 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
482 ksp = iter.raw_value;
483 } else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
484 is_simd8 = iter.raw_value;
485 } else if (strcmp(iter.name, "Dispatch Mode") == 0) {
486 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
487 } else if (strcmp(iter.name, "Dispatch Enable") == 0) {
488 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
489 } else if (strcmp(iter.name, "Enable") == 0) {
490 is_enabled = iter.raw_value;
491 }
492 }
493
494 const char *type =
495 strcmp(inst->name, "VS_STATE") == 0 ? "vertex shader" :
496 strcmp(inst->name, "GS_STATE") == 0 ? "geometry shader" :
497 strcmp(inst->name, "SF_STATE") == 0 ? "strips and fans shader" :
498 strcmp(inst->name, "CLIP_STATE") == 0 ? "clip shader" :
499 strcmp(inst->name, "3DSTATE_DS") == 0 ? "tessellation evaluation shader" :
500 strcmp(inst->name, "3DSTATE_HS") == 0 ? "tessellation control shader" :
501 strcmp(inst->name, "3DSTATE_VS") == 0 ? (is_simd8 ? "SIMD8 vertex shader" : "vec4 vertex shader") :
502 strcmp(inst->name, "3DSTATE_GS") == 0 ? (is_simd8 ? "SIMD8 geometry shader" : "vec4 geometry shader") :
503 NULL;
504
505 if (is_enabled) {
506 ctx_disassemble_program(ctx, ksp, type);
507 printf("\n");
508 }
509 }
510
511 static void
512 decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
513 {
514 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
515
516 uint64_t ksp[3] = {0, 0, 0};
517 bool enabled[3] = {false, false, false};
518
519 struct gen_field_iterator iter;
520 gen_field_iterator_init(&iter, inst, p, 0, false);
521 while (gen_field_iterator_next(&iter)) {
522 if (strncmp(iter.name, "Kernel Start Pointer ",
523 strlen("Kernel Start Pointer ")) == 0) {
524 int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
525 ksp[idx] = strtol(iter.value, NULL, 16);
526 } else if (strcmp(iter.name, "8 Pixel Dispatch Enable") == 0) {
527 enabled[0] = strcmp(iter.value, "true") == 0;
528 } else if (strcmp(iter.name, "16 Pixel Dispatch Enable") == 0) {
529 enabled[1] = strcmp(iter.value, "true") == 0;
530 } else if (strcmp(iter.name, "32 Pixel Dispatch Enable") == 0) {
531 enabled[2] = strcmp(iter.value, "true") == 0;
532 }
533 }
534
535 /* Reorder KSPs to be [8, 16, 32] instead of the hardware order. */
536 if (enabled[0] + enabled[1] + enabled[2] == 1) {
537 if (enabled[1]) {
538 ksp[1] = ksp[0];
539 ksp[0] = 0;
540 } else if (enabled[2]) {
541 ksp[2] = ksp[0];
542 ksp[0] = 0;
543 }
544 } else {
545 uint64_t tmp = ksp[1];
546 ksp[1] = ksp[2];
547 ksp[2] = tmp;
548 }
549
550 if (enabled[0])
551 ctx_disassemble_program(ctx, ksp[0], "SIMD8 fragment shader");
552 if (enabled[1])
553 ctx_disassemble_program(ctx, ksp[1], "SIMD16 fragment shader");
554 if (enabled[2])
555 ctx_disassemble_program(ctx, ksp[2], "SIMD32 fragment shader");
556 fprintf(ctx->fp, "\n");
557 }
558
559 static void
560 decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
561 {
562 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
563 struct gen_group *body =
564 gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
565
566 uint32_t read_length[4] = {0};
567 uint64_t read_addr[4];
568
569 struct gen_field_iterator outer;
570 gen_field_iterator_init(&outer, inst, p, 0, false);
571 while (gen_field_iterator_next(&outer)) {
572 if (outer.struct_desc != body)
573 continue;
574
575 struct gen_field_iterator iter;
576 gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
577 0, false);
578
579 while (gen_field_iterator_next(&iter)) {
580 int idx;
581 if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
582 read_length[idx] = iter.raw_value;
583 } else if (sscanf(iter.name, "Buffer[%d]", &idx) == 1) {
584 read_addr[idx] = iter.raw_value;
585 }
586 }
587
588 for (int i = 0; i < 4; i++) {
589 if (read_length[i] == 0)
590 continue;
591
592 struct gen_batch_decode_bo buffer = ctx_get_bo(ctx, read_addr[i]);
593 if (!buffer.map) {
594 fprintf(ctx->fp, "constant buffer %d unavailable\n", i);
595 continue;
596 }
597
598 unsigned size = read_length[i] * 32;
599 fprintf(ctx->fp, "constant buffer %d, size %u\n", i, size);
600
601 ctx_print_buffer(ctx, buffer, size, 0, -1);
602 }
603 }
604 }
605
606 static void
607 decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
608 const uint32_t *p)
609 {
610 dump_binding_table(ctx, p[1], -1);
611 }
612
613 static void
614 decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx *ctx,
615 const uint32_t *p)
616 {
617 dump_samplers(ctx, p[1], -1);
618 }
619
620 static void
621 decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx *ctx,
622 const uint32_t *p)
623 {
624 dump_samplers(ctx, p[1], -1);
625 dump_samplers(ctx, p[2], -1);
626 dump_samplers(ctx, p[3], -1);
627 }
628
629 static bool
630 str_ends_with(const char *str, const char *end)
631 {
632 int offset = strlen(str) - strlen(end);
633 if (offset < 0)
634 return false;
635
636 return strcmp(str + offset, end) == 0;
637 }
638
639 static void
640 decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
641 const char *struct_type, const uint32_t *p,
642 int count)
643 {
644 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
645 struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
646
647 uint32_t state_offset = 0;
648
649 struct gen_field_iterator iter;
650 gen_field_iterator_init(&iter, inst, p, 0, false);
651 while (gen_field_iterator_next(&iter)) {
652 if (str_ends_with(iter.name, "Pointer")) {
653 state_offset = iter.raw_value;
654 break;
655 }
656 }
657
658 uint64_t state_addr = ctx->dynamic_base + state_offset;
659 struct gen_batch_decode_bo bo = ctx_get_bo(ctx, state_addr);
660 const void *state_map = bo.map;
661
662 if (state_map == NULL) {
663 fprintf(ctx->fp, " dynamic %s state unavailable\n", struct_type);
664 return;
665 }
666
667 for (int i = 0; i < count; i++) {
668 fprintf(ctx->fp, "%s %d\n", struct_type, i);
669 ctx_print_group(ctx, state, state_offset, state_map);
670
671 state_addr += state->dw_length * 4;
672 state_map += state->dw_length;
673 }
674 }
675
676 static void
677 decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx *ctx,
678 const uint32_t *p)
679 {
680 decode_dynamic_state_pointers(ctx, "CC_VIEWPORT", p, 4);
681 }
682
683 static void
684 decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx *ctx,
685 const uint32_t *p)
686 {
687 decode_dynamic_state_pointers(ctx, "SF_CLIP_VIEWPORT", p, 4);
688 }
689
690 static void
691 decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx *ctx,
692 const uint32_t *p)
693 {
694 decode_dynamic_state_pointers(ctx, "BLEND_STATE", p, 1);
695 }
696
697 static void
698 decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx *ctx,
699 const uint32_t *p)
700 {
701 decode_dynamic_state_pointers(ctx, "COLOR_CALC_STATE", p, 1);
702 }
703
704 static void
705 decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx *ctx,
706 const uint32_t *p)
707 {
708 decode_dynamic_state_pointers(ctx, "SCISSOR_RECT", p, 1);
709 }
710
711 static void
712 decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
713 {
714 struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
715
716 if (reg != NULL) {
717 fprintf(ctx->fp, "register %s (0x%x): 0x%x\n",
718 reg->name, reg->register_offset, p[2]);
719 ctx_print_group(ctx, reg, reg->register_offset, &p[2]);
720 }
721 }
722
723 struct custom_decoder {
724 const char *cmd_name;
725 void (*decode)(struct gen_batch_decode_ctx *ctx, const uint32_t *p);
726 } custom_decoders[] = {
727 { "STATE_BASE_ADDRESS", handle_state_base_address },
728 { "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load },
729 { "3DSTATE_VERTEX_BUFFERS", handle_3dstate_vertex_buffers },
730 { "3DSTATE_INDEX_BUFFER", handle_3dstate_index_buffer },
731 { "3DSTATE_VS", decode_single_ksp },
732 { "3DSTATE_GS", decode_single_ksp },
733 { "3DSTATE_DS", decode_single_ksp },
734 { "3DSTATE_HS", decode_single_ksp },
735 { "3DSTATE_PS", decode_ps_kernels },
736 { "3DSTATE_CONSTANT_VS", decode_3dstate_constant },
737 { "3DSTATE_CONSTANT_GS", decode_3dstate_constant },
738 { "3DSTATE_CONSTANT_PS", decode_3dstate_constant },
739 { "3DSTATE_CONSTANT_HS", decode_3dstate_constant },
740 { "3DSTATE_CONSTANT_DS", decode_3dstate_constant },
741
742 { "3DSTATE_BINDING_TABLE_POINTERS_VS", decode_3dstate_binding_table_pointers },
743 { "3DSTATE_BINDING_TABLE_POINTERS_HS", decode_3dstate_binding_table_pointers },
744 { "3DSTATE_BINDING_TABLE_POINTERS_DS", decode_3dstate_binding_table_pointers },
745 { "3DSTATE_BINDING_TABLE_POINTERS_GS", decode_3dstate_binding_table_pointers },
746 { "3DSTATE_BINDING_TABLE_POINTERS_PS", decode_3dstate_binding_table_pointers },
747
748 { "3DSTATE_SAMPLER_STATE_POINTERS_VS", decode_3dstate_sampler_state_pointers },
749 { "3DSTATE_SAMPLER_STATE_POINTERS_HS", decode_3dstate_sampler_state_pointers },
750 { "3DSTATE_SAMPLER_STATE_POINTERS_DS", decode_3dstate_sampler_state_pointers },
751 { "3DSTATE_SAMPLER_STATE_POINTERS_GS", decode_3dstate_sampler_state_pointers },
752 { "3DSTATE_SAMPLER_STATE_POINTERS_PS", decode_3dstate_sampler_state_pointers },
753 { "3DSTATE_SAMPLER_STATE_POINTERS", decode_3dstate_sampler_state_pointers_gen6 },
754
755 { "3DSTATE_VIEWPORT_STATE_POINTERS_CC", decode_3dstate_viewport_state_pointers_cc },
756 { "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", decode_3dstate_viewport_state_pointers_sf_clip },
757 { "3DSTATE_BLEND_STATE_POINTERS", decode_3dstate_blend_state_pointers },
758 { "3DSTATE_CC_STATE_POINTERS", decode_3dstate_cc_state_pointers },
759 { "3DSTATE_SCISSOR_STATE_POINTERS", decode_3dstate_scissor_state_pointers },
760 { "MI_LOAD_REGISTER_IMM", decode_load_register_imm }
761 };
762
763 static inline uint64_t
764 get_address(struct gen_spec *spec, const uint32_t *p)
765 {
766 /* Addresses are always guaranteed to be page-aligned and sometimes
767 * hardware packets have extra stuff stuffed in the bottom 12 bits.
768 */
769 uint64_t addr = p[0] & ~0xfffu;
770
771 if (gen_spec_get_gen(spec) >= gen_make_gen(8,0)) {
772 /* On Broadwell and above, we have 48-bit addresses which consume two
773 * dwords. Some packets require that these get stored in a "canonical
774 * form" which means that bit 47 is sign-extended through the upper
775 * bits. In order to correctly handle those aub dumps, we need to mask
776 * off the top 16 bits.
777 */
778 addr |= ((uint64_t)p[1] & 0xffff) << 32;
779 }
780
781 return addr;
782 }
783
784 void
785 gen_print_batch(struct gen_batch_decode_ctx *ctx,
786 const uint32_t *batch, uint32_t batch_size,
787 uint64_t batch_addr)
788 {
789 const uint32_t *p, *end = batch + batch_size;
790 int length;
791 struct gen_group *inst;
792
793 for (p = batch; p < end; p += length) {
794 inst = gen_spec_find_instruction(ctx->spec, p);
795 length = gen_group_get_length(inst, p);
796 assert(inst == NULL || length > 0);
797 length = MAX2(1, length);
798
799 const char *reset_color = ctx->flags & GEN_BATCH_DECODE_IN_COLOR ? NORMAL : "";
800
801 uint64_t offset;
802 if (ctx->flags & GEN_BATCH_DECODE_OFFSETS)
803 offset = batch_addr + ((char *)p - (char *)batch);
804 else
805 offset = 0;
806
807 if (inst == NULL) {
808 fprintf(ctx->fp, "%s0x%08"PRIx64": unknown instruction %08x%s\n",
809 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) ? RED_COLOR : "",
810 offset, p[0], reset_color);
811 continue;
812 }
813
814 const char *color;
815 const char *inst_name = gen_group_get_name(inst);
816 if (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) {
817 reset_color = NORMAL;
818 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
819 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0 ||
820 strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0)
821 color = GREEN_HEADER;
822 else
823 color = BLUE_HEADER;
824 } else {
825 color = NORMAL;
826 }
827 } else {
828 color = "";
829 reset_color = "";
830 }
831
832 fprintf(ctx->fp, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n",
833 color, offset, p[0], inst_name, reset_color);
834
835 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
836 ctx_print_group(ctx, inst, offset, p);
837
838 for (int i = 0; i < ARRAY_LENGTH(custom_decoders); i++) {
839 if (strcmp(inst_name, custom_decoders[i].cmd_name) == 0) {
840 custom_decoders[i].decode(ctx, p);
841 break;
842 }
843 }
844 }
845
846 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0) {
847 struct gen_batch_decode_bo next_batch = {};
848 bool second_level;
849 struct gen_field_iterator iter;
850 gen_field_iterator_init(&iter, inst, p, 0, false);
851 while (gen_field_iterator_next(&iter)) {
852 if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
853 next_batch = ctx_get_bo(ctx, iter.raw_value);
854 } else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
855 second_level = iter.raw_value;
856 }
857 }
858
859 if (next_batch.map == NULL) {
860 fprintf(ctx->fp, "Secondary batch at 0x%08"PRIx64" unavailable\n",
861 next_batch.addr);
862 } else {
863 gen_print_batch(ctx, next_batch.map, next_batch.size,
864 next_batch.addr);
865 }
866 if (second_level) {
867 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" set acts
868 * like a subroutine call. Commands that come afterwards get
869 * processed once the 2nd level batch buffer returns with
870 * MI_BATCH_BUFFER_END.
871 */
872 continue;
873 } else {
874 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" unset acts
875 * like a goto. Nothing after it will ever get processed. In
876 * order to prevent the recursion from growing, we just reset the
877 * loop and continue;
878 */
879 break;
880 }
881 } else if (strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0) {
882 break;
883 }
884 }
885 }