i965: Allocate shadow batches to explicitly be the BO size.
[mesa.git] / src / intel / tools / gen_batch_decoder.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "common/gen_decoder.h"
25 #include "gen_disasm.h"
26
27 #include <string.h>
28
29 void
30 gen_batch_decode_ctx_init(struct gen_batch_decode_ctx *ctx,
31 const struct gen_device_info *devinfo,
32 FILE *fp, enum gen_batch_decode_flags flags,
33 const char *xml_path,
34 struct gen_batch_decode_bo (*get_bo)(void *,
35 uint64_t),
36 void *user_data)
37 {
38 memset(ctx, 0, sizeof(*ctx));
39
40 ctx->get_bo = get_bo;
41 ctx->user_data = user_data;
42 ctx->fp = fp;
43 ctx->flags = flags;
44
45 if (xml_path == NULL)
46 ctx->spec = gen_spec_load(devinfo);
47 else
48 ctx->spec = gen_spec_load_from_path(devinfo, xml_path);
49 ctx->disasm = gen_disasm_create(devinfo);
50 }
51
52 void
53 gen_batch_decode_ctx_finish(struct gen_batch_decode_ctx *ctx)
54 {
55 gen_spec_destroy(ctx->spec);
56 gen_disasm_destroy(ctx->disasm);
57 }
58
59 #define CSI "\e["
60 #define RED_COLOR CSI "31m"
61 #define BLUE_HEADER CSI "0;44m"
62 #define GREEN_HEADER CSI "1;42m"
63 #define NORMAL CSI "0m"
64
65 #define ARRAY_LENGTH(a) (sizeof (a) / sizeof (a)[0])
66
67 static void
68 ctx_print_group(struct gen_batch_decode_ctx *ctx,
69 struct gen_group *group,
70 uint64_t address, const void *map)
71 {
72 gen_print_group(ctx->fp, group, address, map, 0,
73 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) != 0);
74 }
75
76 static struct gen_batch_decode_bo
77 ctx_get_bo(struct gen_batch_decode_ctx *ctx, uint64_t addr)
78 {
79 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0)) {
80 /* On Broadwell and above, we have 48-bit addresses which consume two
81 * dwords. Some packets require that these get stored in a "canonical
82 * form" which means that bit 47 is sign-extended through the upper
83 * bits. In order to correctly handle those aub dumps, we need to mask
84 * off the top 16 bits.
85 */
86 addr &= (~0ull >> 16);
87 }
88
89 struct gen_batch_decode_bo bo = ctx->get_bo(ctx->user_data, addr);
90
91 if (gen_spec_get_gen(ctx->spec) >= gen_make_gen(8,0))
92 bo.addr &= (~0ull >> 16);
93
94 /* We may actually have an offset into the bo */
95 if (bo.map != NULL) {
96 assert(bo.addr <= addr);
97 uint64_t offset = addr - bo.addr;
98 bo.map += offset;
99 bo.addr += offset;
100 bo.size -= offset;
101 }
102
103 return bo;
104 }
105
106 static void
107 ctx_disassemble_program(struct gen_batch_decode_ctx *ctx,
108 uint32_t ksp, const char *type)
109 {
110 if (!ctx->instruction_base.map)
111 return;
112
113 printf("\nReferenced %s:\n", type);
114 gen_disasm_disassemble(ctx->disasm,
115 (void *)ctx->instruction_base.map, ksp,
116 ctx->fp);
117 }
118
119 /* Heuristic to determine whether a uint32_t is probably actually a float
120 * (http://stackoverflow.com/a/2953466)
121 */
122
123 static bool
124 probably_float(uint32_t bits)
125 {
126 int exp = ((bits & 0x7f800000U) >> 23) - 127;
127 uint32_t mant = bits & 0x007fffff;
128
129 /* +- 0.0 */
130 if (exp == -127 && mant == 0)
131 return true;
132
133 /* +- 1 billionth to 1 billion */
134 if (-30 <= exp && exp <= 30)
135 return true;
136
137 /* some value with only a few binary digits */
138 if ((mant & 0x0000ffff) == 0)
139 return true;
140
141 return false;
142 }
143
144 static void
145 ctx_print_buffer(struct gen_batch_decode_ctx *ctx,
146 struct gen_batch_decode_bo bo,
147 uint32_t read_length,
148 uint32_t pitch)
149 {
150 const uint32_t *dw_end = bo.map + MIN2(bo.size, read_length);
151
152 unsigned line_count = 0;
153 for (const uint32_t *dw = bo.map; dw < dw_end; dw++) {
154 if (line_count * 4 == pitch || line_count == 8) {
155 fprintf(ctx->fp, "\n");
156 line_count = 0;
157 }
158 fprintf(ctx->fp, line_count == 0 ? " " : " ");
159
160 if ((ctx->flags & GEN_BATCH_DECODE_FLOATS) && probably_float(*dw))
161 fprintf(ctx->fp, " %8.2f", *(float *) dw);
162 else
163 fprintf(ctx->fp, " 0x%08x", *dw);
164
165 line_count++;
166 }
167 fprintf(ctx->fp, "\n");
168 }
169
170 static void
171 handle_state_base_address(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
172 {
173 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
174
175 struct gen_field_iterator iter;
176 gen_field_iterator_init(&iter, inst, p, 0, false);
177
178 while (gen_field_iterator_next(&iter)) {
179 if (strcmp(iter.name, "Surface State Base Address") == 0) {
180 ctx->surface_base = ctx_get_bo(ctx, iter.raw_value);
181 } else if (strcmp(iter.name, "Dynamic State Base Address") == 0) {
182 ctx->dynamic_base = ctx_get_bo(ctx, iter.raw_value);
183 } else if (strcmp(iter.name, "Instruction Base Address") == 0) {
184 ctx->instruction_base = ctx_get_bo(ctx, iter.raw_value);
185 }
186 }
187 }
188
189 static void
190 dump_binding_table(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
191 {
192 struct gen_group *strct =
193 gen_spec_find_struct(ctx->spec, "RENDER_SURFACE_STATE");
194 if (strct == NULL) {
195 fprintf(ctx->fp, "did not find RENDER_SURFACE_STATE info\n");
196 return;
197 }
198
199 /* If we don't know the actual count, guess. */
200 if (count < 0)
201 count = 8;
202
203 if (ctx->surface_base.map == NULL) {
204 fprintf(ctx->fp, " binding table unavailable\n");
205 return;
206 }
207
208 if (offset % 32 != 0 || offset >= UINT16_MAX ||
209 offset >= ctx->surface_base.size) {
210 fprintf(ctx->fp, " invalid binding table pointer\n");
211 return;
212 }
213
214 const uint32_t *pointers = ctx->surface_base.map + offset;
215 for (int i = 0; i < count; i++) {
216 if (pointers[i] == 0)
217 continue;
218
219 if (pointers[i] % 32 != 0 ||
220 (pointers[i] + strct->dw_length * 4) >= ctx->surface_base.size) {
221 fprintf(ctx->fp, "pointer %u: %08x <not valid>\n", i, pointers[i]);
222 continue;
223 }
224
225 fprintf(ctx->fp, "pointer %u: %08x\n", i, pointers[i]);
226 ctx_print_group(ctx, strct, ctx->surface_base.addr + pointers[i],
227 ctx->surface_base.map + pointers[i]);
228 }
229 }
230
231 static void
232 dump_samplers(struct gen_batch_decode_ctx *ctx, uint32_t offset, int count)
233 {
234 struct gen_group *strct = gen_spec_find_struct(ctx->spec, "SAMPLER_STATE");
235
236 /* If we don't know the actual count, guess. */
237 if (count < 0)
238 count = 4;
239
240 if (ctx->dynamic_base.map == NULL) {
241 fprintf(ctx->fp, " samplers unavailable\n");
242 return;
243 }
244
245 if (offset % 32 != 0 || offset >= ctx->dynamic_base.size) {
246 fprintf(ctx->fp, " invalid sampler state pointer\n");
247 return;
248 }
249
250 uint64_t state_addr = ctx->dynamic_base.addr + offset;
251 const void *state_map = ctx->dynamic_base.map + offset;
252 for (int i = 0; i < count; i++) {
253 fprintf(ctx->fp, "sampler state %d\n", i);
254 ctx_print_group(ctx, strct, state_addr, state_map);
255 state_addr += 16;
256 state_map += 16;
257 }
258 }
259
260 static void
261 handle_media_interface_descriptor_load(struct gen_batch_decode_ctx *ctx,
262 const uint32_t *p)
263 {
264 if (ctx->dynamic_base.map == NULL)
265 return;
266
267 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
268 struct gen_group *desc =
269 gen_spec_find_struct(ctx->spec, "INTERFACE_DESCRIPTOR_DATA");
270
271 struct gen_field_iterator iter;
272 gen_field_iterator_init(&iter, inst, p, 0, false);
273 uint32_t descriptor_offset = 0;
274 int descriptor_count = 0;
275 while (gen_field_iterator_next(&iter)) {
276 if (strcmp(iter.name, "Interface Descriptor Data Start Address") == 0) {
277 descriptor_offset = strtol(iter.value, NULL, 16);
278 } else if (strcmp(iter.name, "Interface Descriptor Total Length") == 0) {
279 descriptor_count =
280 strtol(iter.value, NULL, 16) / (desc->dw_length * 4);
281 }
282 }
283
284 uint64_t desc_addr = ctx->dynamic_base.addr + descriptor_offset;
285 const uint32_t *desc_map = ctx->dynamic_base.map + descriptor_offset;
286 for (int i = 0; i < descriptor_count; i++) {
287 fprintf(ctx->fp, "descriptor %d: %08x\n", i, descriptor_offset);
288
289 ctx_print_group(ctx, desc, desc_addr, desc_map);
290
291 gen_field_iterator_init(&iter, desc, desc_map, 0, false);
292 uint64_t ksp;
293 uint32_t sampler_offset, sampler_count;
294 uint32_t binding_table_offset, binding_entry_count;
295 while (gen_field_iterator_next(&iter)) {
296 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
297 ksp = strtoll(iter.value, NULL, 16);
298 } else if (strcmp(iter.name, "Sampler State Pointer") == 0) {
299 sampler_offset = strtol(iter.value, NULL, 16);
300 } else if (strcmp(iter.name, "Sampler Count") == 0) {
301 sampler_count = strtol(iter.value, NULL, 10);
302 } else if (strcmp(iter.name, "Binding Table Pointer") == 0) {
303 binding_table_offset = strtol(iter.value, NULL, 16);
304 } else if (strcmp(iter.name, "Binding Table Entry Count") == 0) {
305 binding_entry_count = strtol(iter.value, NULL, 10);
306 }
307 }
308
309 ctx_disassemble_program(ctx, ksp, "compute shader");
310 printf("\n");
311
312 dump_samplers(ctx, sampler_offset, sampler_count);
313 dump_binding_table(ctx, binding_table_offset, binding_entry_count);
314
315 desc_map += desc->dw_length;
316 desc_addr += desc->dw_length * 4;
317 }
318 }
319
320 static void
321 handle_3dstate_vertex_buffers(struct gen_batch_decode_ctx *ctx,
322 const uint32_t *p)
323 {
324 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
325 struct gen_group *vbs = gen_spec_find_struct(ctx->spec, "VERTEX_BUFFER_STATE");
326
327 struct gen_batch_decode_bo vb = {};
328 uint32_t vb_size = 0;
329 int index = -1;
330 int pitch = -1;
331 bool ready = false;
332
333 struct gen_field_iterator iter;
334 gen_field_iterator_init(&iter, inst, p, 0, false);
335 while (gen_field_iterator_next(&iter)) {
336 if (iter.struct_desc != vbs)
337 continue;
338
339 struct gen_field_iterator vbs_iter;
340 gen_field_iterator_init(&vbs_iter, vbs, &iter.p[iter.start_bit / 32], 0, false);
341 while (gen_field_iterator_next(&vbs_iter)) {
342 if (strcmp(vbs_iter.name, "Vertex Buffer Index") == 0) {
343 index = vbs_iter.raw_value;
344 } else if (strcmp(vbs_iter.name, "Buffer Pitch") == 0) {
345 pitch = vbs_iter.raw_value;
346 } else if (strcmp(vbs_iter.name, "Buffer Starting Address") == 0) {
347 vb = ctx_get_bo(ctx, vbs_iter.raw_value);
348 } else if (strcmp(vbs_iter.name, "Buffer Size") == 0) {
349 vb_size = vbs_iter.raw_value;
350 ready = true;
351 } else if (strcmp(vbs_iter.name, "End Address") == 0) {
352 if (vb.map && vbs_iter.raw_value >= vb.addr)
353 vb_size = vbs_iter.raw_value - vb.addr;
354 else
355 vb_size = 0;
356 ready = true;
357 }
358
359 if (!ready)
360 continue;
361
362 fprintf(ctx->fp, "vertex buffer %d, size %d\n", index, vb_size);
363
364 if (vb.map == NULL) {
365 fprintf(ctx->fp, " buffer contents unavailable\n");
366 continue;
367 }
368
369 if (vb.map == 0 || vb_size == 0)
370 continue;
371
372 ctx_print_buffer(ctx, vb, vb_size, pitch);
373
374 vb.map = NULL;
375 vb_size = 0;
376 index = -1;
377 pitch = -1;
378 ready = false;
379 }
380 }
381 }
382
383 static void
384 handle_3dstate_index_buffer(struct gen_batch_decode_ctx *ctx,
385 const uint32_t *p)
386 {
387 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
388
389 struct gen_batch_decode_bo ib = {};
390 uint32_t ib_size = 0;
391 uint32_t format = 0;
392
393 struct gen_field_iterator iter;
394 gen_field_iterator_init(&iter, inst, p, 0, false);
395 while (gen_field_iterator_next(&iter)) {
396 if (strcmp(iter.name, "Index Format") == 0) {
397 format = iter.raw_value;
398 } else if (strcmp(iter.name, "Buffer Starting Address") == 0) {
399 ib = ctx_get_bo(ctx, iter.raw_value);
400 } else if (strcmp(iter.name, "Buffer Size") == 0) {
401 ib_size = iter.raw_value;
402 }
403 }
404
405 if (ib.map == NULL) {
406 fprintf(ctx->fp, " buffer contents unavailable\n");
407 return;
408 }
409
410 const void *m = ib.map;
411 const void *ib_end = ib.map + MIN2(ib.size, ib_size);
412 for (int i = 0; m < ib_end && i < 10; i++) {
413 switch (format) {
414 case 0:
415 fprintf(ctx->fp, "%3d ", *(uint8_t *)m);
416 m += 1;
417 break;
418 case 1:
419 fprintf(ctx->fp, "%3d ", *(uint16_t *)m);
420 m += 2;
421 break;
422 case 2:
423 fprintf(ctx->fp, "%3d ", *(uint32_t *)m);
424 m += 4;
425 break;
426 }
427 }
428
429 if (m < ib_end)
430 fprintf(ctx->fp, "...");
431 fprintf(ctx->fp, "\n");
432 }
433
434 static void
435 decode_single_ksp(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
436 {
437 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
438
439 uint64_t ksp = 0;
440 bool is_simd8 = false; /* vertex shaders on Gen8+ only */
441 bool is_enabled = true;
442
443 struct gen_field_iterator iter;
444 gen_field_iterator_init(&iter, inst, p, 0, false);
445 while (gen_field_iterator_next(&iter)) {
446 if (strcmp(iter.name, "Kernel Start Pointer") == 0) {
447 ksp = iter.raw_value;
448 } else if (strcmp(iter.name, "SIMD8 Dispatch Enable") == 0) {
449 is_simd8 = iter.raw_value;
450 } else if (strcmp(iter.name, "Dispatch Mode") == 0) {
451 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
452 } else if (strcmp(iter.name, "Dispatch Enable") == 0) {
453 is_simd8 = strcmp(iter.value, "SIMD8") == 0;
454 } else if (strcmp(iter.name, "Enable") == 0) {
455 is_enabled = iter.raw_value;
456 }
457 }
458
459 const char *type =
460 strcmp(inst->name, "VS_STATE") == 0 ? "vertex shader" :
461 strcmp(inst->name, "GS_STATE") == 0 ? "geometry shader" :
462 strcmp(inst->name, "SF_STATE") == 0 ? "strips and fans shader" :
463 strcmp(inst->name, "CLIP_STATE") == 0 ? "clip shader" :
464 strcmp(inst->name, "3DSTATE_DS") == 0 ? "tessellation evaluation shader" :
465 strcmp(inst->name, "3DSTATE_HS") == 0 ? "tessellation control shader" :
466 strcmp(inst->name, "3DSTATE_VS") == 0 ? (is_simd8 ? "SIMD8 vertex shader" : "vec4 vertex shader") :
467 strcmp(inst->name, "3DSTATE_GS") == 0 ? (is_simd8 ? "SIMD8 geometry shader" : "vec4 geometry shader") :
468 NULL;
469
470 if (is_enabled) {
471 ctx_disassemble_program(ctx, ksp, type);
472 printf("\n");
473 }
474 }
475
476 static void
477 decode_ps_kernels(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
478 {
479 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
480
481 uint64_t ksp[3] = {0, 0, 0};
482 bool enabled[3] = {false, false, false};
483
484 struct gen_field_iterator iter;
485 gen_field_iterator_init(&iter, inst, p, 0, false);
486 while (gen_field_iterator_next(&iter)) {
487 if (strncmp(iter.name, "Kernel Start Pointer ",
488 strlen("Kernel Start Pointer ")) == 0) {
489 int idx = iter.name[strlen("Kernel Start Pointer ")] - '0';
490 ksp[idx] = strtol(iter.value, NULL, 16);
491 } else if (strcmp(iter.name, "8 Pixel Dispatch Enable") == 0) {
492 enabled[0] = strcmp(iter.value, "true") == 0;
493 } else if (strcmp(iter.name, "16 Pixel Dispatch Enable") == 0) {
494 enabled[1] = strcmp(iter.value, "true") == 0;
495 } else if (strcmp(iter.name, "32 Pixel Dispatch Enable") == 0) {
496 enabled[2] = strcmp(iter.value, "true") == 0;
497 }
498 }
499
500 /* Reorder KSPs to be [8, 16, 32] instead of the hardware order. */
501 if (enabled[0] + enabled[1] + enabled[2] == 1) {
502 if (enabled[1]) {
503 ksp[1] = ksp[0];
504 ksp[0] = 0;
505 } else if (enabled[2]) {
506 ksp[2] = ksp[0];
507 ksp[0] = 0;
508 }
509 } else {
510 uint64_t tmp = ksp[1];
511 ksp[1] = ksp[2];
512 ksp[2] = tmp;
513 }
514
515 if (enabled[0])
516 ctx_disassemble_program(ctx, ksp[0], "SIMD8 fragment shader");
517 if (enabled[1])
518 ctx_disassemble_program(ctx, ksp[1], "SIMD16 fragment shader");
519 if (enabled[2])
520 ctx_disassemble_program(ctx, ksp[2], "SIMD32 fragment shader");
521 fprintf(ctx->fp, "\n");
522 }
523
524 static void
525 decode_3dstate_constant(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
526 {
527 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
528
529 uint32_t read_length[4];
530 struct gen_batch_decode_bo buffer[4];
531 memset(buffer, 0, sizeof(buffer));
532
533 int rlidx = 0, bidx = 0;
534
535 struct gen_field_iterator iter;
536 gen_field_iterator_init(&iter, inst, p, 0, false);
537 while (gen_field_iterator_next(&iter)) {
538 if (strcmp(iter.name, "Read Length") == 0) {
539 read_length[rlidx++] = iter.raw_value;
540 } else if (strcmp(iter.name, "Buffer") == 0) {
541 buffer[bidx++] = ctx_get_bo(ctx, iter.raw_value);
542 }
543 }
544
545 for (int i = 0; i < 4; i++) {
546 if (read_length[i] == 0 || buffer[i].map == NULL)
547 continue;
548
549 unsigned size = read_length[i] * 32;
550 fprintf(ctx->fp, "constant buffer %d, size %u\n", i, size);
551
552 ctx_print_buffer(ctx, buffer[i], size, 0);
553 }
554 }
555
556 static void
557 decode_3dstate_binding_table_pointers(struct gen_batch_decode_ctx *ctx,
558 const uint32_t *p)
559 {
560 dump_binding_table(ctx, p[1], -1);
561 }
562
563 static void
564 decode_3dstate_sampler_state_pointers(struct gen_batch_decode_ctx *ctx,
565 const uint32_t *p)
566 {
567 dump_samplers(ctx, p[1], -1);
568 }
569
570 static void
571 decode_3dstate_sampler_state_pointers_gen6(struct gen_batch_decode_ctx *ctx,
572 const uint32_t *p)
573 {
574 dump_samplers(ctx, p[1], -1);
575 dump_samplers(ctx, p[2], -1);
576 dump_samplers(ctx, p[3], -1);
577 }
578
579 static bool
580 str_ends_with(const char *str, const char *end)
581 {
582 int offset = strlen(str) - strlen(end);
583 if (offset < 0)
584 return false;
585
586 return strcmp(str + offset, end) == 0;
587 }
588
589 static void
590 decode_dynamic_state_pointers(struct gen_batch_decode_ctx *ctx,
591 const char *struct_type, const uint32_t *p,
592 int count)
593 {
594 if (ctx->dynamic_base.map == NULL) {
595 fprintf(ctx->fp, " dynamic %s state unavailable\n", struct_type);
596 return;
597 }
598
599 struct gen_group *inst = gen_spec_find_instruction(ctx->spec, p);
600 struct gen_group *state = gen_spec_find_struct(ctx->spec, struct_type);
601
602 uint32_t state_offset;
603
604 struct gen_field_iterator iter;
605 gen_field_iterator_init(&iter, inst, p, 0, false);
606 while (gen_field_iterator_next(&iter)) {
607 if (str_ends_with(iter.name, "Pointer")) {
608 state_offset = iter.raw_value;
609 break;
610 }
611 }
612
613 uint32_t state_addr = ctx->dynamic_base.addr + state_offset;
614 const uint32_t *state_map = ctx->dynamic_base.map + state_offset;
615 for (int i = 0; i < count; i++) {
616 fprintf(ctx->fp, "%s %d\n", struct_type, i);
617 ctx_print_group(ctx, state, state_offset, state_map);
618
619 state_addr += state->dw_length * 4;
620 state_map += state->dw_length;
621 }
622 }
623
624 static void
625 decode_3dstate_viewport_state_pointers_cc(struct gen_batch_decode_ctx *ctx,
626 const uint32_t *p)
627 {
628 decode_dynamic_state_pointers(ctx, "CC_VIEWPORT", p, 4);
629 }
630
631 static void
632 decode_3dstate_viewport_state_pointers_sf_clip(struct gen_batch_decode_ctx *ctx,
633 const uint32_t *p)
634 {
635 decode_dynamic_state_pointers(ctx, "SF_CLIP_VIEWPORT", p, 4);
636 }
637
638 static void
639 decode_3dstate_blend_state_pointers(struct gen_batch_decode_ctx *ctx,
640 const uint32_t *p)
641 {
642 decode_dynamic_state_pointers(ctx, "BLEND_STATE", p, 1);
643 }
644
645 static void
646 decode_3dstate_cc_state_pointers(struct gen_batch_decode_ctx *ctx,
647 const uint32_t *p)
648 {
649 decode_dynamic_state_pointers(ctx, "COLOR_CALC_STATE", p, 1);
650 }
651
652 static void
653 decode_3dstate_scissor_state_pointers(struct gen_batch_decode_ctx *ctx,
654 const uint32_t *p)
655 {
656 decode_dynamic_state_pointers(ctx, "SCISSOR_RECT", p, 1);
657 }
658
659 static void
660 decode_load_register_imm(struct gen_batch_decode_ctx *ctx, const uint32_t *p)
661 {
662 struct gen_group *reg = gen_spec_find_register(ctx->spec, p[1]);
663
664 if (reg != NULL) {
665 fprintf(ctx->fp, "register %s (0x%x): 0x%x\n",
666 reg->name, reg->register_offset, p[2]);
667 ctx_print_group(ctx, reg, reg->register_offset, &p[2]);
668 }
669 }
670
671 struct custom_decoder {
672 const char *cmd_name;
673 void (*decode)(struct gen_batch_decode_ctx *ctx, const uint32_t *p);
674 } custom_decoders[] = {
675 { "STATE_BASE_ADDRESS", handle_state_base_address },
676 { "MEDIA_INTERFACE_DESCRIPTOR_LOAD", handle_media_interface_descriptor_load },
677 { "3DSTATE_VERTEX_BUFFERS", handle_3dstate_vertex_buffers },
678 { "3DSTATE_INDEX_BUFFER", handle_3dstate_index_buffer },
679 { "3DSTATE_VS", decode_single_ksp },
680 { "3DSTATE_GS", decode_single_ksp },
681 { "3DSTATE_DS", decode_single_ksp },
682 { "3DSTATE_HS", decode_single_ksp },
683 { "3DSTATE_PS", decode_ps_kernels },
684 { "3DSTATE_CONSTANT_VS", decode_3dstate_constant },
685 { "3DSTATE_CONSTANT_GS", decode_3dstate_constant },
686 { "3DSTATE_CONSTANT_PS", decode_3dstate_constant },
687 { "3DSTATE_CONSTANT_HS", decode_3dstate_constant },
688 { "3DSTATE_CONSTANT_DS", decode_3dstate_constant },
689
690 { "3DSTATE_BINDING_TABLE_POINTERS_VS", decode_3dstate_binding_table_pointers },
691 { "3DSTATE_BINDING_TABLE_POINTERS_HS", decode_3dstate_binding_table_pointers },
692 { "3DSTATE_BINDING_TABLE_POINTERS_DS", decode_3dstate_binding_table_pointers },
693 { "3DSTATE_BINDING_TABLE_POINTERS_GS", decode_3dstate_binding_table_pointers },
694 { "3DSTATE_BINDING_TABLE_POINTERS_PS", decode_3dstate_binding_table_pointers },
695
696 { "3DSTATE_SAMPLER_STATE_POINTERS_VS", decode_3dstate_sampler_state_pointers },
697 { "3DSTATE_SAMPLER_STATE_POINTERS_HS", decode_3dstate_sampler_state_pointers },
698 { "3DSTATE_SAMPLER_STATE_POINTERS_DS", decode_3dstate_sampler_state_pointers },
699 { "3DSTATE_SAMPLER_STATE_POINTERS_GS", decode_3dstate_sampler_state_pointers },
700 { "3DSTATE_SAMPLER_STATE_POINTERS_PS", decode_3dstate_sampler_state_pointers },
701 { "3DSTATE_SAMPLER_STATE_POINTERS", decode_3dstate_sampler_state_pointers_gen6 },
702
703 { "3DSTATE_VIEWPORT_STATE_POINTERS_CC", decode_3dstate_viewport_state_pointers_cc },
704 { "3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP", decode_3dstate_viewport_state_pointers_sf_clip },
705 { "3DSTATE_BLEND_STATE_POINTERS", decode_3dstate_blend_state_pointers },
706 { "3DSTATE_CC_STATE_POINTERS", decode_3dstate_cc_state_pointers },
707 { "3DSTATE_SCISSOR_STATE_POINTERS", decode_3dstate_scissor_state_pointers },
708 { "MI_LOAD_REGISTER_IMM", decode_load_register_imm }
709 };
710
711 static inline uint64_t
712 get_address(struct gen_spec *spec, const uint32_t *p)
713 {
714 /* Addresses are always guaranteed to be page-aligned and sometimes
715 * hardware packets have extra stuff stuffed in the bottom 12 bits.
716 */
717 uint64_t addr = p[0] & ~0xfffu;
718
719 if (gen_spec_get_gen(spec) >= gen_make_gen(8,0)) {
720 /* On Broadwell and above, we have 48-bit addresses which consume two
721 * dwords. Some packets require that these get stored in a "canonical
722 * form" which means that bit 47 is sign-extended through the upper
723 * bits. In order to correctly handle those aub dumps, we need to mask
724 * off the top 16 bits.
725 */
726 addr |= ((uint64_t)p[1] & 0xffff) << 32;
727 }
728
729 return addr;
730 }
731
732 void
733 gen_print_batch(struct gen_batch_decode_ctx *ctx,
734 const uint32_t *batch, uint32_t batch_size,
735 uint64_t batch_addr)
736 {
737 const uint32_t *p, *end = batch + batch_size;
738 int length;
739 struct gen_group *inst;
740
741 for (p = batch; p < end; p += length) {
742 inst = gen_spec_find_instruction(ctx->spec, p);
743 length = gen_group_get_length(inst, p);
744 assert(inst == NULL || length > 0);
745 length = MAX2(1, length);
746
747 const char *reset_color = ctx->flags & GEN_BATCH_DECODE_IN_COLOR ? NORMAL : "";
748
749 uint64_t offset;
750 if (ctx->flags & GEN_BATCH_DECODE_OFFSETS)
751 offset = batch_addr + ((char *)p - (char *)batch);
752 else
753 offset = 0;
754
755 if (inst == NULL) {
756 fprintf(ctx->fp, "%s0x%08"PRIx64": unknown instruction %08x%s\n",
757 (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) ? RED_COLOR : "",
758 offset, p[0], reset_color);
759 continue;
760 }
761
762 const char *color;
763 const char *inst_name = gen_group_get_name(inst);
764 if (ctx->flags & GEN_BATCH_DECODE_IN_COLOR) {
765 reset_color = NORMAL;
766 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
767 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0 ||
768 strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0)
769 color = GREEN_HEADER;
770 else
771 color = BLUE_HEADER;
772 } else {
773 color = NORMAL;
774 }
775 } else {
776 color = "";
777 reset_color = "";
778 }
779
780 fprintf(ctx->fp, "%s0x%08"PRIx64": 0x%08x: %-80s%s\n",
781 color, offset, p[0], inst_name, reset_color);
782
783 if (ctx->flags & GEN_BATCH_DECODE_FULL) {
784 ctx_print_group(ctx, inst, offset, p);
785
786 for (int i = 0; i < ARRAY_LENGTH(custom_decoders); i++) {
787 if (strcmp(inst_name, custom_decoders[i].cmd_name) == 0) {
788 custom_decoders[i].decode(ctx, p);
789 break;
790 }
791 }
792 }
793
794 if (strcmp(inst_name, "MI_BATCH_BUFFER_START") == 0) {
795 struct gen_batch_decode_bo next_batch;
796 bool second_level;
797 struct gen_field_iterator iter;
798 gen_field_iterator_init(&iter, inst, p, 0, false);
799 while (gen_field_iterator_next(&iter)) {
800 if (strcmp(iter.name, "Batch Buffer Start Address") == 0) {
801 next_batch = ctx_get_bo(ctx, iter.raw_value);
802 } else if (strcmp(iter.name, "Second Level Batch Buffer") == 0) {
803 second_level = iter.raw_value;
804 }
805 }
806
807 if (next_batch.map == NULL) {
808 fprintf(ctx->fp, "Secondary batch at 0x%08"PRIx64" unavailable",
809 next_batch.addr);
810 }
811
812 if (second_level) {
813 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" set acts
814 * like a subroutine call. Commands that come afterwards get
815 * processed once the 2nd level batch buffer returns with
816 * MI_BATCH_BUFFER_END.
817 */
818 if (next_batch.map) {
819 gen_print_batch(ctx, next_batch.map, next_batch.size,
820 next_batch.addr);
821 }
822 } else {
823 /* MI_BATCH_BUFFER_START with "2nd Level Batch Buffer" unset acts
824 * like a goto. Nothing after it will ever get processed. In
825 * order to prevent the recursion from growing, we just reset the
826 * loop and continue;
827 */
828 if (next_batch.map) {
829 p = next_batch.map;
830 end = next_batch.map + next_batch.size;
831 length = 0;
832 continue;
833 } else {
834 /* Nothing we can do */
835 break;
836 }
837 }
838 } else if (strcmp(inst_name, "MI_BATCH_BUFFER_END") == 0) {
839 break;
840 }
841 }
842 }