intel: Fix 3DSTATE_CONSTANT buffer decoding.
[mesa.git] / src / intel / common / gen_batch_decoder.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "common/gen_decoder.h"
25 #include "gen_disasm.h"
26
27 #include <string.h>
28
29 void
30 gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
31 const struct gen_device_info *devinfo,
32 FILE *fp, enum gen_batch_decode_flags flags,
33 const char *xml_path,
34 struct gen_batch_decode_bo (*get_bo)(void *,
35 uint64_t),
36 unsigned (*get_state_size)(void *, uint32_t),
37 void *user_data)
38 {
39 memset(ctx, 0, sizeof(*ctx));
40
41 ctx->get_bo = get_bo;
42 ctx->get_state_size = get_state_size;
43 ctx->user_data = user_data;
44 ctx->fp = fp;
45 ctx->flags = flags;
46
47 if (xml_path == NULL)
48 ctx->spec = gen_spec_load(devinfo);
49 else
50 ctx->spec = gen_spec_load_from_path(devinfo, xml_path);
51 ctx->disasm = gen_disasm_create(devinfo);
52 }
53
54 void
55 gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
56 {
57 gen_spec_destroy(ctx->spec);
58 gen_disasm_destroy(ctx->disasm);
59 }
60
61 #define CSI "\e["
62 #define RED_COLOR CSI "31m"
63 #define BLUE_HEADER CSI "0;44m"
64 #define GREEN_HEADER CSI "1;42m"
65 #define NORMAL CSI "0m"
66
67 #define ARRAY_LENGTH(a) (sizeof (a) / sizeof (a)[0])
68
69 static void
70 ctx_print_group(struct gen_batch_decode_ctx *ctx,
71 struct gen_group *group,
72 uint64_t address, const void *map)
73 {
74 gen_print_group(ctx->fp, group, address, map, 0,
75 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) != 0);
76 }
77
78 static struct gen_batch_decode_bo
79 ctx_get_bo(struct gen_batch_decode_ctx *ctx, uint64_t addr)
80 {
81 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
82 /* On Broadwell and above, we have 48-bit addresses which consume two
83 * dwords. Some packets require that these get stored in a "canonical
84 * form" which means that bit 47 is sign-extended through the upper
85 * bits. In order to correctly handle those aub dumps, we need to mask
86 * off the top 16 bits.
87 */
88 addr &= (~0ull >> 16);
89 }
90
91 struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, addr);
92
93 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
94 bo.addr &= (~0ull >> 16);
95
96 /* We may actually have an offset into the bo */
97 if (bo.map != NULL) {
98 assert(bo.addr <= addr);
99 uint64_t offset = addr - bo.addr;
100 bo.map += offset;
101 bo.addr += offset;
102 bo.size -= offset;
103 }
104
105 return bo;
106 }
107
108 static int
109 update_count(struct gen_batch_decode_ctx *ctx,
110 uint32_t offset_from_dsba,
111 unsigned element_dwords,
112 unsigned guess)
113 {
114 unsigned size = 0;
115
116 if (ctx->get_state_size)
117 size = ctx->get_state_size(ctx->user_data, offset_from_dsba);
118
119 if (size > 0)
120 return size / (sizeof(uint32_t) * element_dwords);
121
122 /* In the absence of any information, just guess arbitrarily. */
123 return guess;
124 }
125
126 static void
127 ctx_disassemble_program(struct gen_batch_decode_ctx *ctx,
128 uint32_t ksp, const char *type)
129 {
130 if (!ctx->instruction_base.map)
131 return;
132
133 printf("\nReferenced %s:\n", type);
134 gen_disasm_disassemble(ctx->disasm,
135 (void *)ctx->instruction_base.map, ksp,
136 ctx->fp);
137 }
138
139 /* Heuristic to determine whether a uint32_t is probably actually a float
140 * (http://stackoverflow.com/a/2953466)
141 */
142
143 static bool
144 probably_float(uint32_t bits)
145 {
146 int exp = ((bits & 0x7f800000U) >> 23) - 127;
147 uint32_t mant = bits & 0x007fffff;
148
149 /* +- 0.0 */
150 if (exp == -127 && mant == 0)
151 return true;
152
153 /* +- 1 billionth to 1 billion */
154 if (-30 <= exp && exp <= 30)
155 return true;
156
157 /* some value with only a few binary digits */
158 if ((mant & 0x0000ffff) == 0)
159 return true;
160
161 return false;
162 }
163
164 static void
165 ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
166 struct gen_batch_decode_bo bo,
167 uint32_t read_length,
168 uint32_t pitch)
169 {
170 const uint32_t *dw_end = bo.map + MIN2(bo.size, read_length);
171
172 unsigned line_count = 0;
173 for (const uint32_t *dw = bo.map; dw < dw_end; dw++) {
174 if (line_count * 4 == pitch || line_count == 8) {
175 fprintf(ctx->fp, "\n");
176 line_count = 0;
177 }
178 fprintf(ctx->fp, line_count == 0 ? " " : " ");
179
180 if ((ctx->flags & GEN_BATCH_DECODE_FLOATS) && probably_float(*dw))
181 fprintf(ctx->fp, " %8.2f", *(float *) dw);
182 else
183 fprintf(ctx->fp, " 0x%08x", *dw);
184
185 line_count++;
186 }
187 fprintf(ctx->fp, "\n");
188 }
189
190 static void
191 handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
192 {
193 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
194
195 struct gen_field_iterator iter;
196 gen_field_iterator_init(&iter, inst, p, 0, false);
197
198 while (gen_field_iterator_next(&iter)) {
199 if (strcmp(iter.name, "Surface State Base Address") == 0) {
200 ctx->surface_base = ctx_get_bo(ctx, iter.raw_value);
201 } else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
202 ctx->dynamic_base = ctx_get_bo(ctx, iter.raw_value);
203 } else if (strcmp(iter.name, "Instruction Base Address") == 0) {
204 ctx->instruction_base = ctx_get_bo(ctx, iter.raw_value);
205 }
206 }
207 }
208
209 static void
210 dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
211 {
212 struct gen_group *strct =
213 gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
214 if (strct == NULL) {
215 fprintf(ctx->fp, "did not find RENDER_SURFACE_STATE info\n");
216 return;
217 }
218
219 if (count < 0)
220 count = update_count(ctx, offset, 1, 8);
221
222 if (ctx->surface_base.map == NULL) {
223 fprintf(ctx->fp, " binding table unavailable\n");
224 return;
225 }
226
227 if (offset % 32 != 0 || offset >= UINT16_MAX ||
228 offset >= ctx->surface_base.size) {
229 fprintf(ctx->fp, " invalid binding table pointer\n");
230 return;
231 }
232
233 const uint32_t *pointers = ctx->surface_base.map + offset;
234 for (int i = 0; i < count; i++) {
235 if (pointers[i] == 0)
236 continue;
237
238 if (pointers[i] % 32 != 0 ||
239 (pointers[i] + strct->dw_length * 4) >= ctx->surface_base.size) {
240 fprintf(ctx->fp, "pointer %u: %08x <not valid>\n", i, pointers[i]);
241 continue;
242 }
243
244 fprintf(ctx->fp, "pointer %u: %08x\n", i, pointers[i]);
245 ctx_print_group(ctx, strct, ctx->surface_base.addr + pointers[i],
246 ctx->surface_base.map + pointers[i]);
247 }
248 }
249
250 static void
251 dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
252 {
253 struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
254
255 if (count < 0)
256 count = update_count(ctx, offset, strct->dw_length, 4);
257
258 if (ctx->dynamic_base.map == NULL) {
259 fprintf(ctx->fp, " samplers unavailable\n");
260 return;
261 }
262
263 if (offset % 32 != 0 || offset >= ctx->dynamic_base.size) {
264 fprintf(ctx->fp, " invalid sampler state pointer\n");
265 return;
266 }
267
268 uint64_t state_addr = ctx->dynamic_base.addr + offset;
269 const void *state_map = ctx->dynamic_base.map + offset;
270 for (int i = 0; i < count; i++) {
271 fprintf(ctx->fp, "sampler state %d\n", i);
272 ctx_print_group(ctx, strct, state_addr, state_map);
273 state_addr += 16;
274 state_map += 16;
275 }
276 }
277
278 static void
279 handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
280 const uint32_t *p)
281 {
282 if (ctx->dynamic_base.map == NULL)
283 return;
284
285 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
286 struct gen_group *desc =
287 gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
288
289 struct gen_field_iterator iter;
290 gen_field_iterator_init(&iter, inst, p, 0, false);
291 uint32_t descriptor_offset = 0;
292 int descriptor_count = 0;
293 while (gen_field_iterator_next(&iter)) {
294 if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
295 descriptor_offset = strtol(iter.value, NULL, 16);
296 } else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
297 descriptor_count =
298 strtol(iter.value, NULL, 16) / (desc->dw_length * 4);
299 }
300 }
301
302 uint64_t desc_addr = ctx->dynamic_base.addr + descriptor_offset;
303 const uint32_t *desc_map = ctx->dynamic_base.map + descriptor_offset;
304 for (int i = 0; i < descriptor_count; i++) {
305 fprintf(ctx->fp, "descriptor %d: %08x\n", i, descriptor_offset);
306
307 ctx_print_group(ctx, desc, desc_addr, desc_map);
308
309 gen_field_iterator_init(&iter, desc, desc_map, 0, false);
310 uint64_t ksp;
311 uint32_t sampler_offset, sampler_count;
312 uint32_t binding_table_offset, binding_entry_count;
313 while (gen_field_iterator_next(&iter)) {
314 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
315 ksp = strtoll(iter.value, NULL, 16);
316 } else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
317 sampler_offset = strtol(iter.value, NULL, 16);
318 } else if (strcmp(iter.name, "Sampler Count") == 0) {
319 sampler_count = strtol(iter.value, NULL, 10);
320 } else if (strcmp(iter.name, "Binding Table Pointer") == 0) {
321 binding_table_offset = strtol(iter.value, NULL, 16);
322 } else if (strcmp(iter.name, "Binding Table Entry Count") == 0) {
323 binding_entry_count = strtol(iter.value, NULL, 10);
324 }
325 }
326
327 ctx_disassemble_program(ctx, ksp, "compute shader");
328 printf("\n");
329
330 dump_samplers(ctx, sampler_offset, sampler_count);
331 dump_binding_table(ctx, binding_table_offset, binding_entry_count);
332
333 desc_map += desc->dw_length;
334 desc_addr += desc->dw_length * 4;
335 }
336 }
337
338 static void
339 handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
340 const uint32_t *p)
341 {
342 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
343 struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
344
345 struct gen_batch_decode_bo vb = {};
346 uint32_t vb_size = 0;
347 int index = -1;
348 int pitch = -1;
349 bool ready = false;
350
351 struct gen_field_iterator iter;
352 gen_field_iterator_init(&iter, inst, p, 0, false);
353 while (gen_field_iterator_next(&iter)) {
354 if (iter.struct_desc != vbs)
355 continue;
356
357 struct gen_field_iterator vbs_iter;
358 gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
359 while (gen_field_iterator_next(&vbs_iter)) {
360 if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
361 index = vbs_iter.raw_value;
362 } else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
363 pitch = vbs_iter.raw_value;
364 } else if (strcmp(vbs_iter.name, "Buffer Starting Address") == 0) {
365 vb = ctx_get_bo(ctx, vbs_iter.raw_value);
366 } else if (strcmp(vbs_iter.name, "Buffer Size") == 0) {
367 vb_size = vbs_iter.raw_value;
368 ready = true;
369 } else if (strcmp(vbs_iter.name, "End Address") == 0) {
370 if (vb.map && vbs_iter.raw_value >= vb.addr)
371 vb_size = vbs_iter.raw_value - vb.addr;
372 else
373 vb_size = 0;
374 ready = true;
375 }
376
377 if (!ready)
378 continue;
379
380 fprintf(ctx->fp, "vertex buffer %d, size %d\n", index, vb_size);
381
382 if (vb.map == NULL) {
383 fprintf(ctx->fp, " buffer contents unavailable\n");
384 continue;
385 }
386
387 if (vb.map == 0 || vb_size == 0)
388 continue;
389
390 ctx_print_buffer(ctx, vb, vb_size, pitch);
391
392 vb.map = NULL;
393 vb_size = 0;
394 index = -1;
395 pitch = -1;
396 ready = false;
397 }
398 }
399 }
400
401 static void
402 handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
403 const uint32_t *p)
404 {
405 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
406
407 struct gen_batch_decode_bo ib = {};
408 uint32_t ib_size = 0;
409 uint32_t format = 0;
410
411 struct gen_field_iterator iter;
412 gen_field_iterator_init(&iter, inst, p, 0, false);
413 while (gen_field_iterator_next(&iter)) {
414 if (strcmp(iter.name, "Index Format") == 0) {
415 format = iter.raw_value;
416 } else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
417 ib = ctx_get_bo(ctx, iter.raw_value);
418 } else if (strcmp(iter.name, "Buffer Size") == 0) {
419 ib_size = iter.raw_value;
420 }
421 }
422
423 if (ib.map == NULL) {
424 fprintf(ctx->fp, " buffer contents unavailable\n");
425 return;
426 }
427
428 const void *m = ib.map;
429 const void *ib_end = ib.map + MIN2(ib.size, ib_size);
430 for (int i = 0; m < ib_end && i < 10; i++) {
431 switch (format) {
432 case 0:
433 fprintf(ctx->fp, "%3d ", *(uint8_t *)m);
434 m += 1;
435 break;
436 case 1:
437 fprintf(ctx->fp, "%3d ", *(uint16_t *)m);
438 m += 2;
439 break;
440 case 2:
441 fprintf(ctx->fp, "%3d ", *(uint32_t *)m);
442 m += 4;
443 break;
444 }
445 }
446
447 if (m < ib_end)
448 fprintf(ctx->fp, "...");
449 fprintf(ctx->fp, "\n");
450 }
451
452 static void
453 decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
454 {
455 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
456
457 uint64_t ksp = 0;
458 bool is_simd8 = false; /* vertex shaders on Gen8+ only */
459 bool is_enabled = true;
460
461 struct gen_field_iterator iter;
462 gen_field_iterator_init(&iter, inst, p, 0, false);
463 while (gen_field_iterator_next(&iter)) {
464 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
465 ksp = iter.raw_value;
466 } else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
467 is_simd8 = iter.raw_value;
468 } else if (strcmp(iter.name, "Dispatch Mode") == 0) {
469 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
470 } else if (strcmp(iter.name, "Dispatch Enable") == 0) {
471 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
472 } else if (strcmp(iter.name, "Enable") == 0) {
473 is_enabled = iter.raw_value;
474 }
475 }
476
477 const char *type =
478 strcmp(inst->name, "VS_STATE") == 0 ? "vertex shader" :
479 strcmp(inst->name, "GS_STATE") == 0 ? "geometry shader" :
480 strcmp(inst->name, "SF_STATE") == 0 ? "strips and fans shader" :
481 strcmp(inst->name, "CLIP_STATE") == 0 ? "clip shader" :
482 strcmp(inst->name, "3DSTATE_DS") == 0 ? "tessellation evaluation shader" :
483 strcmp(inst->name, "3DSTATE_HS") == 0 ? "tessellation control shader" :
484 strcmp(inst->name, "3DSTATE_VS") == 0 ? (is_simd8 ? "SIMD8 vertex shader" : "vec4 vertex shader") :
485 strcmp(inst->name, "3DSTATE_GS") == 0 ? (is_simd8 ? "SIMD8 geometry shader" : "vec4 geometry shader") :
486 NULL;
487
488 if (is_enabled) {
489 ctx_disassemble_program(ctx, ksp, type);
490 printf("\n");
491 }
492 }
493
494 static void
495 decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
496 {
497 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
498
499 uint64_t ksp[3] = {0, 0, 0};
500 bool enabled[3] = {false, false, false};
501
502 struct gen_field_iterator iter;
503 gen_field_iterator_init(&iter, inst, p, 0, false);
504 while (gen_field_iterator_next(&iter)) {
505 if (strncmp(iter.name, "Kernel Start Pointer ",
506 strlen("Kernel Start Pointer ")) == 0) {
507 int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
508 ksp[idx] = strtol(iter.value, NULL, 16);
509 } else if (strcmp(iter.name, "8 Pixel Dispatch Enable") == 0) {
510 enabled[0] = strcmp(iter.value, "true") == 0;
511 } else if (strcmp(iter.name, "16 Pixel Dispatch Enable") == 0) {
512 enabled[1] = strcmp(iter.value, "true") == 0;
513 } else if (strcmp(iter.name, "32 Pixel Dispatch Enable") == 0) {
514 enabled[2] = strcmp(iter.value, "true") == 0;
515 }
516 }
517
518 /* Reorder KSPs to be [8, 16, 32] instead of the hardware order. */
519 if (enabled[0] + enabled[1] + enabled[2] == 1) {
520 if (enabled[1]) {
521 ksp[1] = ksp[0];
522 ksp[0] = 0;
523 } else if (enabled[2]) {
524 ksp[2] = ksp[0];
525 ksp[0] = 0;
526 }
527 } else {
528 uint64_t tmp = ksp[1];
529 ksp[1] = ksp[2];
530 ksp[2] = tmp;
531 }
532
533 if (enabled[0])
534 ctx_disassemble_program(ctx, ksp[0], "SIMD8 fragment shader");
535 if (enabled[1])
536 ctx_disassemble_program(ctx, ksp[1], "SIMD16 fragment shader");
537 if (enabled[2])
538 ctx_disassemble_program(ctx, ksp[2], "SIMD32 fragment shader");
539 fprintf(ctx->fp, "\n");
540 }
541
542 static void
543 decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
544 {
545 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
546 struct gen_group *body =
547 gen_spec_find_struct(ctx->spec, "3DSTATE_CONSTANT_BODY");
548
549 uint32_t read_length[4];
550 struct gen_batch_decode_bo buffer[4];
551 memset(buffer, 0, sizeof(buffer));
552
553 struct gen_field_iterator outer;
554 gen_field_iterator_init(&outer, inst, p, 0, false);
555 while (gen_field_iterator_next(&outer)) {
556 if (outer.struct_desc != body)
557 continue;
558
559 struct gen_field_iterator iter;
560 gen_field_iterator_init(&iter, body, &outer.p[outer.start_bit / 32],
561 0, false);
562
563 while (gen_field_iterator_next(&iter)) {
564 int idx;
565 if (sscanf(iter.name, "Read Length[%d]", &idx) == 1) {
566 read_length[idx] = iter.raw_value;
567 } else if (sscanf(iter.name, "Buffer[%d]", &idx) == 1) {
568 buffer[idx] = ctx_get_bo(ctx, iter.raw_value);
569 }
570 }
571
572 for (int i = 0; i < 4; i++) {
573 if (read_length[i] == 0 || buffer[i].map == NULL)
574 continue;
575
576 unsigned size = read_length[i] * 32;
577 fprintf(ctx->fp, "constant buffer %d, size %u\n", i, size);
578
579 ctx_print_buffer(ctx, buffer[i], size, 0);
580 }
581 }
582 }
583
584 static void
585 decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
586 const uint32_t *p)
587 {
588 dump_binding_table(ctx, p[1], -1);
589 }
590
591 static void
592 decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx *ctx,
593 const uint32_t *p)
594 {
595 dump_samplers(ctx, p[1], -1);
596 }
597
598 static void
599 decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx *ctx,
600 const uint32_t *p)
601 {
602 dump_samplers(ctx, p[1], -1);
603 dump_samplers(ctx, p[2], -1);
604 dump_samplers(ctx, p[3], -1);
605 }
606
607 static bool
608 str_ends_with(const char *str, const char *end)
609 {
610 int offset = strlen(str) - strlen(end);
611 if (offset < 0)
612 return false;
613
614 return strcmp(str + offset, end) == 0;
615 }
616
617 static void
618 decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
619 const char *struct_type, const uint32_t *p,
620 int count)
621 {
622 if (ctx->dynamic_base.map == NULL) {
623 fprintf(ctx->fp, " dynamic %s state unavailable\n", struct_type);
624 return;
625 }
626
627 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
628 struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
629
630 uint32_t state_offset;
631
632 struct gen_field_iterator iter;
633 gen_field_iterator_init(&iter, inst, p, 0, false);
634 while (gen_field_iterator_next(&iter)) {
635 if (str_ends_with(iter.name, "Pointer")) {
636 state_offset = iter.raw_value;
637 break;
638 }
639 }
640
641 uint32_t state_addr = ctx->dynamic_base.addr + state_offset;
642 const uint32_t *state_map = ctx->dynamic_base.map + state_offset;
643 for (int i = 0; i < count; i++) {
644 fprintf(ctx->fp, "%s %d\n", struct_type, i);
645 ctx_print_group(ctx, state, state_offset, state_map);
646
647 state_addr += state->dw_length * 4;
648 state_map += state->dw_length;
649 }
650 }
651
652 static void
653 decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx *ctx,
654 const uint32_t *p)
655 {
656 decode_dynamic_state_pointers(ctx, "CC_VIEWPORT", p, 4);
657 }
658
659 static void
660 decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx *ctx,
661 const uint32_t *p)
662 {
663 decode_dynamic_state_pointers(ctx, "SF_CLIP_VIEWPORT", p, 4);
664 }
665
666 static void
667 decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx *ctx,
668 const uint32_t *p)
669 {
670 decode_dynamic_state_pointers(ctx, "BLEND_STATE", p, 1);
671 }
672
673 static void
674 decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx *ctx,
675 const uint32_t *p)
676 {
677 decode_dynamic_state_pointers(ctx, "COLOR_CALC_STATE", p, 1);
678 }
679
680 static void
681 decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx *ctx,
682 const uint32_t *p)
683 {
684 decode_dynamic_state_pointers(ctx, "SCISSOR_RECT", p, 1);
685 }
686
687 static void
688 decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
689 {
690 struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
691
692 if (reg != NULL) {
693 fprintf(ctx->fp, "register %s (0x%x): 0x%x\n",
694 reg->name, reg->register_offset, p[2]);
695 ctx_print_group(ctx, reg, reg->register_offset, &p[2]);
696 }
697 }
698
699 struct custom_decoder {
700 const char *cmd_name;
701 void (*decode)(struct gen_batch_decode_ctx *ctx, const uint32_t *p);
702 } custom_decoders[] = {
703 { "STATE_BASE_ADDRESS", handle_state_base_address },
704 { "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load },
705 { "3DSTATE_VERTEX_BUFFERS", handle_3dstate_vertex_buffers },
706 { "3DSTATE_INDEX_BUFFER", handle_3dstate_index_buffer },
707 { "3DSTATE_VS", decode_single_ksp },
708 { "3DSTATE_GS", decode_single_ksp },
709 { "3DSTATE_DS", decode_single_ksp },
710 { "3DSTATE_HS", decode_single_ksp },
711 { "3DSTATE_PS", decode_ps_kernels },
712 { "3DSTATE_CONSTANT_VS", decode_3dstate_constant },
713 { "3DSTATE_CONSTANT_GS", decode_3dstate_constant },
714 { "3DSTATE_CONSTANT_PS", decode_3dstate_constant },
715 { "3DSTATE_CONSTANT_HS", decode_3dstate_constant },
716 { "3DSTATE_CONSTANT_DS", decode_3dstate_constant },
717
718 { "3DSTATE_BINDING_TABLE_POINTERS_VS", decode_3dstate_binding_table_pointers },
719 { "3DSTATE_BINDING_TABLE_POINTERS_HS", decode_3dstate_binding_table_pointers },
720 { "3DSTATE_BINDING_TABLE_POINTERS_DS", decode_3dstate_binding_table_pointers },
721 { "3DSTATE_BINDING_TABLE_POINTERS_GS", decode_3dstate_binding_table_pointers },
722 { "3DSTATE_BINDING_TABLE_POINTERS_PS", decode_3dstate_binding_table_pointers },
723
724 { "3DSTATE_SAMPLER_STATE_POINTERS_VS", decode_3dstate_sampler_state_pointers },
725 { "3DSTATE_SAMPLER_STATE_POINTERS_HS", decode_3dstate_sampler_state_pointers },
726 { "3DSTATE_SAMPLER_STATE_POINTERS_DS", decode_3dstate_sampler_state_pointers },
727 { "3DSTATE_SAMPLER_STATE_POINTERS_GS", decode_3dstate_sampler_state_pointers },
728 { "3DSTATE_SAMPLER_STATE_POINTERS_PS", decode_3dstate_sampler_state_pointers },
729 { "3DSTATE_SAMPLER_STATE_POINTERS", decode_3dstate_sampler_state_pointers_gen6 },
730
731 { "3DSTATE_VIEWPORT_STATE_POINTERS_CC", decode_3dstate_viewport_state_pointers_cc },
732 { "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", decode_3dstate_viewport_state_pointers_sf_clip },
733 { "3DSTATE_BLEND_STATE_POINTERS", decode_3dstate_blend_state_pointers },
734 { "3DSTATE_CC_STATE_POINTERS", decode_3dstate_cc_state_pointers },
735 { "3DSTATE_SCISSOR_STATE_POINTERS", decode_3dstate_scissor_state_pointers },
736 { "MI_LOAD_REGISTER_IMM", decode_load_register_imm }
737 };
738
739 static inline uint64_t
740 get_address(struct gen_spec *spec, const uint32_t *p)
741 {
742 /* Addresses are always guaranteed to be page-aligned and sometimes
743 * hardware packets have extra stuff stuffed in the bottom 12 bits.
744 */
745 uint64_t addr = p[0] & ~0xfffu;
746
747 if (gen_spec_get_gen(spec) >= gen_make_gen(8,0)) {
748 /* On Broadwell and above, we have 48-bit addresses which consume two
749 * dwords. Some packets require that these get stored in a "canonical
750 * form" which means that bit 47 is sign-extended through the upper
751 * bits. In order to correctly handle those aub dumps, we need to mask
752 * off the top 16 bits.
753 */
754 addr |= ((uint64_t)p[1] & 0xffff) << 32;
755 }
756
757 return addr;
758 }
759
760 void
761 gen_print_batch(struct gen_batch_decode_ctx *ctx,
762 const uint32_t *batch, uint32_t batch_size,
763 uint64_t batch_addr)
764 {
765 const uint32_t *p, *end = batch + batch_size;
766 int length;
767 struct gen_group *inst;
768
769 for (p = batch; p < end; p += length) {
770 inst = gen_spec_find_instruction(ctx->spec, p);
771 length = gen_group_get_length(inst, p);
772 assert(inst == NULL || length > 0);
773 length = MAX2(1, length);
774
775 const char *reset_color = ctx->flags & GEN_BATCH_DECODE_IN_COLOR ? NORMAL : "";
776
777 uint64_t offset;
778 if (ctx->flags & GEN_BATCH_DECODE_OFFSETS)
779 offset = batch_addr + ((char *)p - (char *)batch);
780 else
781 offset = 0;
782
783 if (inst == NULL) {
784 fprintf(ctx->fp, "%s0x%08"PRIx64": unknown instruction %08x%s\n",
785 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) ? RED_COLOR : "",
786 offset, p[0], reset_color);
787 continue;
788 }
789
790 const char *color;
791 const char *inst_name = gen_group_get_name(inst);
792 if (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) {
793 reset_color = NORMAL;
794 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
795 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0 ||
796 strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0)
797 color = GREEN_HEADER;
798 else
799 color = BLUE_HEADER;
800 } else {
801 color = NORMAL;
802 }
803 } else {
804 color = "";
805 reset_color = "";
806 }
807
808 fprintf(ctx->fp, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n",
809 color, offset, p[0], inst_name, reset_color);
810
811 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
812 ctx_print_group(ctx, inst, offset, p);
813
814 for (int i = 0; i < ARRAY_LENGTH(custom_decoders); i++) {
815 if (strcmp(inst_name, custom_decoders[i].cmd_name) == 0) {
816 custom_decoders[i].decode(ctx, p);
817 break;
818 }
819 }
820 }
821
822 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0) {
823 struct gen_batch_decode_bo next_batch;
824 bool second_level;
825 struct gen_field_iterator iter;
826 gen_field_iterator_init(&iter, inst, p, 0, false);
827 while (gen_field_iterator_next(&iter)) {
828 if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
829 next_batch = ctx_get_bo(ctx, iter.raw_value);
830 } else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
831 second_level = iter.raw_value;
832 }
833 }
834
835 if (next_batch.map == NULL) {
836 fprintf(ctx->fp, "Secondary batch at 0x%08"PRIx64" unavailable",
837 next_batch.addr);
838 }
839
840 if (second_level) {
841 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" set acts
842 * like a subroutine call. Commands that come afterwards get
843 * processed once the 2nd level batch buffer returns with
844 * MI_BATCH_BUFFER_END.
845 */
846 if (next_batch.map) {
847 gen_print_batch(ctx, next_batch.map, next_batch.size,
848 next_batch.addr);
849 }
850 } else {
851 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" unset acts
852 * like a goto. Nothing after it will ever get processed. In
853 * order to prevent the recursion from growing, we just reset the
854 * loop and continue;
855 */
856 if (next_batch.map) {
857 p = next_batch.map;
858 end = next_batch.map + next_batch.size;
859 length = 0;
860 continue;
861 } else {
862 /* Nothing we can do */
863 break;
864 }
865 }
866 } else if (strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0) {
867 break;
868 }
869 }
870 }