intel/compiler: Get rid of the global compaction table pointers
[mesa.git] / src / intel / tools / aub_write.c
1 /*
2 * Copyright © 2015 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "aub_write.h"
25
26 #include <inttypes.h>
27 #include <signal.h>
28 #include <stdarg.h>
29 #include <stdlib.h>
30 #include <string.h>
31
32 #include "drm-uapi/i915_drm.h"
33 #include "intel_aub.h"
34 #include "gen_context.h"
35
36 #ifndef ALIGN
37 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
38 #endif
39
40 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
41
42 #define min(a, b) ({ \
43 __typeof(a) _a = (a); \
44 __typeof(b) _b = (b); \
45 _a < _b ? _a : _b; \
46 })
47
48 #define max(a, b) ({ \
49 __typeof(a) _a = (a); \
50 __typeof(b) _b = (b); \
51 _a > _b ? _a : _b; \
52 })
53
54 static struct aub_context *aub_context_new(struct aub_file *aub, uint32_t new_id);
55 static void mem_trace_memory_write_header_out(struct aub_file *aub, uint64_t addr,
56 uint32_t len, uint32_t addr_space,
57 const char *desc);
58
59 static void __attribute__ ((format(__printf__, 2, 3)))
60 fail_if(int cond, const char *format, ...)
61 {
62 va_list args;
63
64 if (!cond)
65 return;
66
67 va_start(args, format);
68 vfprintf(stderr, format, args);
69 va_end(args);
70
71 raise(SIGTRAP);
72 }
73
74 static inline uint32_t
75 align_u32(uint32_t v, uint32_t a)
76 {
77 return (v + a - 1) & ~(a - 1);
78 }
79
80 static void
81 aub_ppgtt_table_finish(struct aub_ppgtt_table *table, int level)
82 {
83 if (level == 1)
84 return;
85
86 for (unsigned i = 0; i < ARRAY_SIZE(table->subtables); i++) {
87 if (table->subtables[i]) {
88 aub_ppgtt_table_finish(table->subtables[i], level - 1);
89 free(table->subtables[i]);
90 }
91 }
92 }
93
94 static void
95 data_out(struct aub_file *aub, const void *data, size_t size)
96 {
97 if (size == 0)
98 return;
99
100 fail_if(fwrite(data, 1, size, aub->file) == 0,
101 "Writing to output failed\n");
102 }
103
104 static void
105 dword_out(struct aub_file *aub, uint32_t data)
106 {
107 data_out(aub, &data, sizeof(data));
108 }
109
110 static void
111 write_execlists_header(struct aub_file *aub, const char *name)
112 {
113 char app_name[8 * 4];
114 int app_name_len, dwords;
115
116 app_name_len =
117 snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s",
118 aub->pci_id, name);
119 app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
120
121 dwords = 5 + app_name_len / sizeof(uint32_t);
122 dword_out(aub, CMD_MEM_TRACE_VERSION | (dwords - 1));
123 dword_out(aub, AUB_MEM_TRACE_VERSION_FILE_VERSION);
124 dword_out(aub, aub->devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
125 dword_out(aub, 0); /* version */
126 dword_out(aub, 0); /* version */
127 data_out(aub, app_name, app_name_len);
128 }
129
130 static void
131 write_legacy_header(struct aub_file *aub, const char *name)
132 {
133 char app_name[8 * 4];
134 char comment[16];
135 int comment_len, comment_dwords, dwords;
136
137 comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", aub->pci_id);
138 comment_dwords = ((comment_len + 3) / 4);
139
140 /* Start with a (required) version packet. */
141 dwords = 13 + comment_dwords;
142 dword_out(aub, CMD_AUB_HEADER | (dwords - 2));
143 dword_out(aub, (4 << AUB_HEADER_MAJOR_SHIFT) |
144 (0 << AUB_HEADER_MINOR_SHIFT));
145
146 /* Next comes a 32-byte application name. */
147 strncpy(app_name, name, sizeof(app_name));
148 app_name[sizeof(app_name) - 1] = 0;
149 data_out(aub, app_name, sizeof(app_name));
150
151 dword_out(aub, 0); /* timestamp */
152 dword_out(aub, 0); /* timestamp */
153 dword_out(aub, comment_len);
154 data_out(aub, comment, comment_dwords * 4);
155 }
156
157
158 static void
159 aub_write_header(struct aub_file *aub, const char *app_name)
160 {
161 if (aub_use_execlists(aub))
162 write_execlists_header(aub, app_name);
163 else
164 write_legacy_header(aub, app_name);
165 }
166
167 void
168 aub_file_init(struct aub_file *aub, FILE *file, FILE *debug, uint16_t pci_id, const char *app_name)
169 {
170 memset(aub, 0, sizeof(*aub));
171
172 aub->verbose_log_file = debug;
173 aub->file = file;
174 aub->pci_id = pci_id;
175 fail_if(!gen_get_device_info_from_pci_id(pci_id, &aub->devinfo),
176 "failed to identify chipset=0x%x\n", pci_id);
177 aub->addr_bits = aub->devinfo.gen >= 8 ? 48 : 32;
178
179 aub_write_header(aub, app_name);
180
181 aub->phys_addrs_allocator = 0;
182 aub->ggtt_addrs_allocator = 0;
183 aub->pml4.phys_addr = aub->phys_addrs_allocator++ << 12;
184
185 mem_trace_memory_write_header_out(aub, aub->ggtt_addrs_allocator++,
186 GEN8_PTE_SIZE,
187 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY,
188 "GGTT PT");
189 dword_out(aub, 1);
190 dword_out(aub, 0);
191
192 aub->next_context_handle = 1;
193 aub_context_new(aub, 0); /* Default context */
194 }
195
196 void
197 aub_file_finish(struct aub_file *aub)
198 {
199 aub_ppgtt_table_finish(&aub->pml4, 4);
200 fclose(aub->file);
201 }
202
203 uint32_t
204 aub_gtt_size(struct aub_file *aub)
205 {
206 return NUM_PT_ENTRIES * (aub->addr_bits > 32 ? GEN8_PTE_SIZE : PTE_SIZE);
207 }
208
209 static void
210 mem_trace_memory_write_header_out(struct aub_file *aub, uint64_t addr,
211 uint32_t len, uint32_t addr_space,
212 const char *desc)
213 {
214 uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
215
216 if (aub->verbose_log_file) {
217 fprintf(aub->verbose_log_file,
218 " MEM WRITE (0x%016" PRIx64 "-0x%016" PRIx64 ") %s\n",
219 addr, addr + len, desc);
220 }
221
222 dword_out(aub, CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
223 dword_out(aub, addr & 0xFFFFFFFF); /* addr lo */
224 dword_out(aub, addr >> 32); /* addr hi */
225 dword_out(aub, addr_space); /* gtt */
226 dword_out(aub, len);
227 }
228
229 static void
230 register_write_out(struct aub_file *aub, uint32_t addr, uint32_t value)
231 {
232 uint32_t dwords = 1;
233
234 if (aub->verbose_log_file) {
235 fprintf(aub->verbose_log_file,
236 " MMIO WRITE (0x%08x = 0x%08x)\n", addr, value);
237 }
238
239 dword_out(aub, CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
240 dword_out(aub, addr);
241 dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
242 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
243 dword_out(aub, 0xFFFFFFFF); /* mask lo */
244 dword_out(aub, 0x00000000); /* mask hi */
245 dword_out(aub, value);
246 }
247
248 static void
249 populate_ppgtt_table(struct aub_file *aub, struct aub_ppgtt_table *table,
250 int start, int end, int level)
251 {
252 uint64_t entries[512] = {0};
253 int dirty_start = 512, dirty_end = 0;
254
255 if (aub->verbose_log_file) {
256 fprintf(aub->verbose_log_file,
257 " PPGTT (0x%016" PRIx64 "), lvl %d, start: %x, end: %x\n",
258 table->phys_addr, level, start, end);
259 }
260
261 for (int i = start; i <= end; i++) {
262 if (!table->subtables[i]) {
263 dirty_start = min(dirty_start, i);
264 dirty_end = max(dirty_end, i);
265 if (level == 1) {
266 table->subtables[i] =
267 (void *)(aub->phys_addrs_allocator++ << 12);
268 if (aub->verbose_log_file) {
269 fprintf(aub->verbose_log_file,
270 " Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
271 i, (uint64_t)table->subtables[i]);
272 }
273 } else {
274 table->subtables[i] =
275 calloc(1, sizeof(struct aub_ppgtt_table));
276 table->subtables[i]->phys_addr =
277 aub->phys_addrs_allocator++ << 12;
278 if (aub->verbose_log_file) {
279 fprintf(aub->verbose_log_file,
280 " Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
281 i, table->subtables[i]->phys_addr);
282 }
283 }
284 }
285 entries[i] = 3 /* read/write | present */ |
286 (level == 1 ? (uint64_t)table->subtables[i] :
287 table->subtables[i]->phys_addr);
288 }
289
290 if (dirty_start <= dirty_end) {
291 uint64_t write_addr = table->phys_addr + dirty_start *
292 sizeof(uint64_t);
293 uint64_t write_size = (dirty_end - dirty_start + 1) *
294 sizeof(uint64_t);
295 mem_trace_memory_write_header_out(aub, write_addr, write_size,
296 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL,
297 "PPGTT update");
298 data_out(aub, entries + dirty_start, write_size);
299 }
300 }
301
302 void
303 aub_map_ppgtt(struct aub_file *aub, uint64_t start, uint64_t size)
304 {
305 uint64_t l4_start = start & 0xff8000000000;
306 uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
307
308 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
309 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
310 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
311 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
312
313 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
314 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
315 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
316
317 if (aub->verbose_log_file) {
318 fprintf(aub->verbose_log_file,
319 " Mapping PPGTT address: 0x%" PRIx64 ", size: %" PRIu64"\n",
320 start, size);
321 }
322
323 populate_ppgtt_table(aub, &aub->pml4, L4_index(l4_start), L4_index(l4_end), 4);
324
325 for (uint64_t l4 = l4_start; l4 < l4_end; l4 += (1ULL << 39)) {
326 uint64_t l3_start = max(l4, start & 0xffffc0000000);
327 uint64_t l3_end = min(l4 + (1ULL << 39) - 1,
328 ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff);
329 uint64_t l3_start_idx = L3_index(l3_start);
330 uint64_t l3_end_idx = L3_index(l3_end);
331
332 populate_ppgtt_table(aub, L3_table(l4), l3_start_idx, l3_end_idx, 3);
333
334 for (uint64_t l3 = l3_start; l3 < l3_end; l3 += (1ULL << 30)) {
335 uint64_t l2_start = max(l3, start & 0xffffffe00000);
336 uint64_t l2_end = min(l3 + (1ULL << 30) - 1,
337 ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff);
338 uint64_t l2_start_idx = L2_index(l2_start);
339 uint64_t l2_end_idx = L2_index(l2_end);
340
341 populate_ppgtt_table(aub, L2_table(l3), l2_start_idx, l2_end_idx, 2);
342
343 for (uint64_t l2 = l2_start; l2 < l2_end; l2 += (1ULL << 21)) {
344 uint64_t l1_start = max(l2, start & 0xfffffffff000);
345 uint64_t l1_end = min(l2 + (1ULL << 21) - 1,
346 ((start + size - 1) | 0x000000000fff) & 0xffffffffffff);
347 uint64_t l1_start_idx = L1_index(l1_start);
348 uint64_t l1_end_idx = L1_index(l1_end);
349
350 populate_ppgtt_table(aub, L1_table(l2), l1_start_idx, l1_end_idx, 1);
351 }
352 }
353 }
354 }
355
356 static uint64_t
357 ppgtt_lookup(struct aub_file *aub, uint64_t ppgtt_addr)
358 {
359 return (uint64_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
360 }
361
362 static const struct engine {
363 const char *name;
364 enum drm_i915_gem_engine_class engine_class;
365 uint32_t hw_class;
366 uint32_t elsp_reg;
367 uint32_t elsq_reg;
368 uint32_t status_reg;
369 uint32_t control_reg;
370 } engines[] = {
371 [I915_ENGINE_CLASS_RENDER] = {
372 .name = "RENDER",
373 .engine_class = I915_ENGINE_CLASS_RENDER,
374 .hw_class = 1,
375 .elsp_reg = EXECLIST_SUBMITPORT_RCSUNIT,
376 .elsq_reg = EXECLIST_SQ_CONTENTS0_RCSUNIT,
377 .status_reg = EXECLIST_STATUS_RCSUNIT,
378 .control_reg = EXECLIST_CONTROL_RCSUNIT,
379 },
380 [I915_ENGINE_CLASS_VIDEO] = {
381 .name = "VIDEO",
382 .engine_class = I915_ENGINE_CLASS_VIDEO,
383 .hw_class = 3,
384 .elsp_reg = EXECLIST_SUBMITPORT_VCSUNIT0,
385 .elsq_reg = EXECLIST_SQ_CONTENTS0_VCSUNIT0,
386 .status_reg = EXECLIST_STATUS_VCSUNIT0,
387 .control_reg = EXECLIST_CONTROL_VCSUNIT0,
388 },
389 [I915_ENGINE_CLASS_COPY] = {
390 .name = "BLITTER",
391 .engine_class = I915_ENGINE_CLASS_COPY,
392 .hw_class = 2,
393 .elsp_reg = EXECLIST_SUBMITPORT_BCSUNIT,
394 .elsq_reg = EXECLIST_SQ_CONTENTS0_BCSUNIT,
395 .status_reg = EXECLIST_STATUS_BCSUNIT,
396 .control_reg = EXECLIST_CONTROL_BCSUNIT,
397 },
398 };
399
400 static void
401 aub_map_ggtt(struct aub_file *aub, uint64_t virt_addr, uint64_t size)
402 {
403 /* Makes the code below a bit simpler. In practice all of the write we
404 * receive from error2aub are page aligned.
405 */
406 assert(virt_addr % 4096 == 0);
407 assert((aub->phys_addrs_allocator + size) < (1UL << 32));
408
409 /* GGTT PT */
410 uint32_t ggtt_ptes = DIV_ROUND_UP(size, 4096);
411 uint64_t phys_addr = aub->phys_addrs_allocator << 12;
412 aub->phys_addrs_allocator += ggtt_ptes;
413
414 if (aub->verbose_log_file) {
415 fprintf(aub->verbose_log_file,
416 " Mapping GGTT address: 0x%" PRIx64 ", size: %" PRIu64" phys_addr=0x%" PRIx64 " entries=%u\n",
417 virt_addr, size, phys_addr, ggtt_ptes);
418 }
419
420 mem_trace_memory_write_header_out(aub,
421 (virt_addr >> 12) * GEN8_PTE_SIZE,
422 ggtt_ptes * GEN8_PTE_SIZE,
423 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY,
424 "GGTT PT");
425 for (uint32_t i = 0; i < ggtt_ptes; i++) {
426 dword_out(aub, 1 + phys_addr + i * 4096);
427 dword_out(aub, 0);
428 }
429 }
430
431 void
432 aub_write_ggtt(struct aub_file *aub, uint64_t virt_addr, uint64_t size, const void *data)
433 {
434 /* Default setup assumes a 1 to 1 mapping between physical and virtual GGTT
435 * addresses. This is somewhat incompatible with the aub_write_ggtt()
436 * function. In practice it doesn't matter as the GGTT writes are used to
437 * replace the default setup and we've taken care to setup the PML4 as the
438 * top of the GGTT.
439 */
440 assert(!aub->has_default_setup);
441
442 aub_map_ggtt(aub, virt_addr, size);
443
444 /* We write the GGTT buffer through the GGTT aub command rather than the
445 * PHYSICAL aub command. This is because the Gen9 simulator seems to have 2
446 * different set of memory pools for GGTT and physical (probably someone
447 * didn't really understand the concept?).
448 */
449 static const char null_block[8 * 4096];
450 for (uint64_t offset = 0; offset < size; offset += 4096) {
451 uint32_t block_size = min(4096, size - offset);
452
453 mem_trace_memory_write_header_out(aub, virt_addr + offset, block_size,
454 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
455 "GGTT buffer");
456 data_out(aub, (char *) data + offset, block_size);
457
458 /* Pad to a multiple of 4 bytes. */
459 data_out(aub, null_block, -block_size & 3);
460 }
461 }
462
463 static const struct engine *
464 engine_from_engine_class(enum drm_i915_gem_engine_class engine_class)
465 {
466 switch (engine_class) {
467 case I915_ENGINE_CLASS_RENDER:
468 case I915_ENGINE_CLASS_COPY:
469 case I915_ENGINE_CLASS_VIDEO:
470 return &engines[engine_class];
471 default:
472 unreachable("unknown ring");
473 }
474 }
475
476 static void
477 get_context_init(const struct gen_device_info *devinfo,
478 const struct gen_context_parameters *params,
479 enum drm_i915_gem_engine_class engine_class,
480 uint32_t *data,
481 uint32_t *size)
482 {
483 static const gen_context_init_t gen8_contexts[] = {
484 [I915_ENGINE_CLASS_RENDER] = gen8_render_context_init,
485 [I915_ENGINE_CLASS_COPY] = gen8_blitter_context_init,
486 [I915_ENGINE_CLASS_VIDEO] = gen8_video_context_init,
487 };
488 static const gen_context_init_t gen10_contexts[] = {
489 [I915_ENGINE_CLASS_RENDER] = gen10_render_context_init,
490 [I915_ENGINE_CLASS_COPY] = gen10_blitter_context_init,
491 [I915_ENGINE_CLASS_VIDEO] = gen10_video_context_init,
492 };
493
494 assert(devinfo->gen >= 8);
495
496 if (devinfo->gen <= 10)
497 gen8_contexts[engine_class](params, data, size);
498 else
499 gen10_contexts[engine_class](params, data, size);
500 }
501
502 static uint64_t
503 alloc_ggtt_address(struct aub_file *aub, uint64_t size)
504 {
505 uint32_t ggtt_ptes = DIV_ROUND_UP(size, 4096);
506 uint64_t addr = aub->ggtt_addrs_allocator << 12;
507
508 aub->ggtt_addrs_allocator += ggtt_ptes;
509 aub_map_ggtt(aub, addr, size);
510
511 return addr;
512 }
513
514 static void
515 write_hwsp(struct aub_file *aub,
516 enum drm_i915_gem_engine_class engine_class)
517 {
518 uint32_t reg = 0;
519 switch (engine_class) {
520 case I915_ENGINE_CLASS_RENDER: reg = HWS_PGA_RCSUNIT; break;
521 case I915_ENGINE_CLASS_COPY: reg = HWS_PGA_BCSUNIT; break;
522 case I915_ENGINE_CLASS_VIDEO: reg = HWS_PGA_VCSUNIT0; break;
523 default:
524 unreachable("unknown ring");
525 }
526
527 register_write_out(aub, reg, aub->engine_setup[engine_class].hwsp_addr);
528 }
529
530 static uint32_t
531 write_engine_execlist_setup(struct aub_file *aub,
532 uint32_t ctx_id,
533 struct aub_hw_context *hw_ctx,
534 enum drm_i915_gem_engine_class engine_class)
535 {
536 const struct engine *cs = engine_from_engine_class(engine_class);
537 uint32_t context_size;
538
539 get_context_init(&aub->devinfo, NULL, engine_class, NULL, &context_size);
540
541 /* GGTT PT */
542 uint32_t total_size = RING_SIZE + PPHWSP_SIZE + context_size;
543 char name[80];
544 uint64_t ggtt_addr = alloc_ggtt_address(aub, total_size);
545
546 snprintf(name, sizeof(name), "%s (ctx id: %d) GGTT PT", cs->name, ctx_id);
547
548 /* RING */
549 hw_ctx->ring_addr = ggtt_addr;
550 snprintf(name, sizeof(name), "%s RING", cs->name);
551 mem_trace_memory_write_header_out(aub, ggtt_addr, RING_SIZE,
552 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
553 name);
554 for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
555 dword_out(aub, 0);
556 ggtt_addr += RING_SIZE;
557
558 /* PPHWSP */
559 hw_ctx->pphwsp_addr = ggtt_addr;
560 snprintf(name, sizeof(name), "%s PPHWSP", cs->name);
561 mem_trace_memory_write_header_out(aub, ggtt_addr,
562 PPHWSP_SIZE + context_size,
563 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
564 name);
565 for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
566 dword_out(aub, 0);
567
568 /* CONTEXT */
569 struct gen_context_parameters params = {
570 .ring_addr = hw_ctx->ring_addr,
571 .ring_size = RING_SIZE,
572 .pml4_addr = aub->pml4.phys_addr,
573 };
574 uint32_t *context_data = calloc(1, context_size);
575 get_context_init(&aub->devinfo, &params, engine_class, context_data, &context_size);
576 data_out(aub, context_data, context_size);
577 free(context_data);
578
579 hw_ctx->initialized = true;
580
581 return total_size;
582 }
583
584 static void
585 write_execlists_default_setup(struct aub_file *aub)
586 {
587 register_write_out(aub, GFX_MODE_RCSUNIT, 0x80008000 /* execlist enable */);
588 register_write_out(aub, GFX_MODE_VCSUNIT0, 0x80008000 /* execlist enable */);
589 register_write_out(aub, GFX_MODE_BCSUNIT, 0x80008000 /* execlist enable */);
590 }
591
592 static void write_legacy_default_setup(struct aub_file *aub)
593 {
594 uint32_t entry = 0x200003;
595
596 /* Set up the GTT. The max we can handle is 64M */
597 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
598 ((aub->addr_bits > 32 ? 6 : 5) - 2));
599 dword_out(aub, AUB_TRACE_MEMTYPE_GTT_ENTRY |
600 AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
601 dword_out(aub, 0); /* subtype */
602 dword_out(aub, 0); /* offset */
603 dword_out(aub, aub_gtt_size(aub)); /* size */
604 if (aub->addr_bits > 32)
605 dword_out(aub, 0);
606 for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
607 dword_out(aub, entry + 0x1000 * i);
608 if (aub->addr_bits > 32)
609 dword_out(aub, 0);
610 }
611 }
612
613 /**
614 * Sets up a default GGTT/PPGTT address space and execlists context (when
615 * supported).
616 */
617 void
618 aub_write_default_setup(struct aub_file *aub)
619 {
620 if (aub_use_execlists(aub))
621 write_execlists_default_setup(aub);
622 else
623 write_legacy_default_setup(aub);
624
625 aub->has_default_setup = true;
626 }
627
628 static struct aub_context *
629 aub_context_new(struct aub_file *aub, uint32_t new_id)
630 {
631 assert(aub->num_contexts < MAX_CONTEXT_COUNT);
632
633 struct aub_context *ctx = &aub->contexts[aub->num_contexts++];
634 memset(ctx, 0, sizeof(*ctx));
635 ctx->id = new_id;
636
637 return ctx;
638 }
639
640 uint32_t
641 aub_write_context_create(struct aub_file *aub, uint32_t *ctx_id)
642 {
643 uint32_t new_id = ctx_id ? *ctx_id : aub->next_context_handle;
644
645 aub_context_new(aub, new_id);
646
647 if (!ctx_id)
648 aub->next_context_handle++;
649
650 return new_id;
651 }
652
653 static struct aub_context *
654 aub_context_find(struct aub_file *aub, uint32_t id)
655 {
656 for (int i = 0; i < aub->num_contexts; i++) {
657 if (aub->contexts[i].id == id)
658 return &aub->contexts[i];
659 }
660
661 return NULL;
662 }
663
664 static struct aub_hw_context *
665 aub_write_ensure_context(struct aub_file *aub, uint32_t ctx_id,
666 enum drm_i915_gem_engine_class engine_class)
667 {
668 struct aub_context *ctx = aub_context_find(aub, ctx_id);
669 assert(ctx != NULL);
670
671 struct aub_hw_context *hw_ctx = &ctx->hw_contexts[engine_class];
672 if (!hw_ctx->initialized)
673 write_engine_execlist_setup(aub, ctx->id, hw_ctx, engine_class);
674
675 return hw_ctx;
676 }
677
678 static uint64_t
679 get_context_descriptor(struct aub_file *aub,
680 const struct engine *cs,
681 struct aub_hw_context *hw_ctx)
682 {
683 return cs->hw_class | hw_ctx->pphwsp_addr | CONTEXT_FLAGS;
684 }
685
686 /**
687 * Break up large objects into multiple writes. Otherwise a 128kb VBO
688 * would overflow the 16 bits of size field in the packet header and
689 * everything goes badly after that.
690 */
691 void
692 aub_write_trace_block(struct aub_file *aub,
693 uint32_t type, void *virtual,
694 uint32_t size, uint64_t gtt_offset)
695 {
696 uint32_t block_size;
697 uint32_t subtype = 0;
698 static const char null_block[8 * 4096];
699
700 for (uint32_t offset = 0; offset < size; offset += block_size) {
701 block_size = min(8 * 4096, size - offset);
702
703 if (aub_use_execlists(aub)) {
704 block_size = min(4096, block_size);
705 mem_trace_memory_write_header_out(aub,
706 ppgtt_lookup(aub, gtt_offset + offset),
707 block_size,
708 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL,
709 "Trace Block");
710 } else {
711 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
712 ((aub->addr_bits > 32 ? 6 : 5) - 2));
713 dword_out(aub, AUB_TRACE_MEMTYPE_GTT |
714 type | AUB_TRACE_OP_DATA_WRITE);
715 dword_out(aub, subtype);
716 dword_out(aub, gtt_offset + offset);
717 dword_out(aub, align_u32(block_size, 4));
718 if (aub->addr_bits > 32)
719 dword_out(aub, (gtt_offset + offset) >> 32);
720 }
721
722 if (virtual)
723 data_out(aub, ((char *) virtual) + offset, block_size);
724 else
725 data_out(aub, null_block, block_size);
726
727 /* Pad to a multiple of 4 bytes. */
728 data_out(aub, null_block, -block_size & 3);
729 }
730 }
731
732 static void
733 aub_dump_ring_buffer_execlist(struct aub_file *aub,
734 struct aub_hw_context *hw_ctx,
735 const struct engine *cs,
736 uint64_t batch_offset)
737 {
738 mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr, 16,
739 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
740 "RING MI_BATCH_BUFFER_START user");
741 dword_out(aub, AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
742 dword_out(aub, batch_offset & 0xFFFFFFFF);
743 dword_out(aub, batch_offset >> 32);
744 dword_out(aub, 0 /* MI_NOOP */);
745
746 mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr + 8192 + 20, 4,
747 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
748 "RING BUFFER HEAD");
749 dword_out(aub, 0); /* RING_BUFFER_HEAD */
750 mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr + 8192 + 28, 4,
751 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
752 "RING BUFFER TAIL");
753 dword_out(aub, 16); /* RING_BUFFER_TAIL */
754 }
755
756 static void
757 aub_dump_execlist(struct aub_file *aub, const struct engine *cs, uint64_t descriptor)
758 {
759 if (aub->devinfo.gen >= 11) {
760 register_write_out(aub, cs->elsq_reg, descriptor & 0xFFFFFFFF);
761 register_write_out(aub, cs->elsq_reg + sizeof(uint32_t), descriptor >> 32);
762 register_write_out(aub, cs->control_reg, 1);
763 } else {
764 register_write_out(aub, cs->elsp_reg, 0);
765 register_write_out(aub, cs->elsp_reg, 0);
766 register_write_out(aub, cs->elsp_reg, descriptor >> 32);
767 register_write_out(aub, cs->elsp_reg, descriptor & 0xFFFFFFFF);
768 }
769
770 dword_out(aub, CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
771 dword_out(aub, cs->status_reg);
772 dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
773 AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
774 if (aub->devinfo.gen >= 11) {
775 dword_out(aub, 0x00000001); /* mask lo */
776 dword_out(aub, 0x00000000); /* mask hi */
777 dword_out(aub, 0x00000001);
778 } else {
779 dword_out(aub, 0x00000010); /* mask lo */
780 dword_out(aub, 0x00000000); /* mask hi */
781 dword_out(aub, 0x00000000);
782 }
783 }
784
785 static void
786 aub_dump_ring_buffer_legacy(struct aub_file *aub,
787 uint64_t batch_offset,
788 uint64_t offset,
789 enum drm_i915_gem_engine_class engine_class)
790 {
791 uint32_t ringbuffer[4096];
792 unsigned aub_mi_bbs_len;
793 int ring_count = 0;
794 static const int engine_class_to_ring[] = {
795 [I915_ENGINE_CLASS_RENDER] = AUB_TRACE_TYPE_RING_PRB0,
796 [I915_ENGINE_CLASS_VIDEO] = AUB_TRACE_TYPE_RING_PRB1,
797 [I915_ENGINE_CLASS_COPY] = AUB_TRACE_TYPE_RING_PRB2,
798 };
799 int ring = engine_class_to_ring[engine_class];
800
801 /* Make a ring buffer to execute our batchbuffer. */
802 memset(ringbuffer, 0, sizeof(ringbuffer));
803
804 aub_mi_bbs_len = aub->addr_bits > 32 ? 3 : 2;
805 ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
806 aub_write_reloc(&aub->devinfo, &ringbuffer[ring_count + 1], batch_offset);
807 ring_count += aub_mi_bbs_len;
808
809 /* Write out the ring. This appears to trigger execution of
810 * the ring in the simulator.
811 */
812 dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
813 ((aub->addr_bits > 32 ? 6 : 5) - 2));
814 dword_out(aub, AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
815 dword_out(aub, 0); /* general/surface subtype */
816 dword_out(aub, offset);
817 dword_out(aub, ring_count * 4);
818 if (aub->addr_bits > 32)
819 dword_out(aub, offset >> 32);
820
821 data_out(aub, ringbuffer, ring_count * 4);
822 }
823
824 static void
825 aub_write_ensure_hwsp(struct aub_file *aub,
826 enum drm_i915_gem_engine_class engine_class)
827 {
828 uint64_t *hwsp_addr = &aub->engine_setup[engine_class].hwsp_addr;
829
830 if (*hwsp_addr != 0)
831 return;
832
833 *hwsp_addr = alloc_ggtt_address(aub, 4096);
834 write_hwsp(aub, engine_class);
835 }
836
837 void
838 aub_write_exec(struct aub_file *aub, uint32_t ctx_id, uint64_t batch_addr,
839 uint64_t offset, enum drm_i915_gem_engine_class engine_class)
840 {
841 const struct engine *cs = engine_from_engine_class(engine_class);
842
843 if (aub_use_execlists(aub)) {
844 struct aub_hw_context *hw_ctx =
845 aub_write_ensure_context(aub, ctx_id, engine_class);
846 uint64_t descriptor = get_context_descriptor(aub, cs, hw_ctx);
847 aub_write_ensure_hwsp(aub, engine_class);
848 aub_dump_ring_buffer_execlist(aub, hw_ctx, cs, batch_addr);
849 aub_dump_execlist(aub, cs, descriptor);
850 } else {
851 /* Dump ring buffer */
852 aub_dump_ring_buffer_legacy(aub, batch_addr, offset, engine_class);
853 }
854 fflush(aub->file);
855 }
856
857 void
858 aub_write_context_execlists(struct aub_file *aub, uint64_t context_addr,
859 enum drm_i915_gem_engine_class engine_class)
860 {
861 const struct engine *cs = engine_from_engine_class(engine_class);
862 uint64_t descriptor = ((uint64_t)1 << 62 | context_addr | CONTEXT_FLAGS);
863 aub_dump_execlist(aub, cs, descriptor);
864 }