2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "aub_write.h"
32 #include "drm-uapi/i915_drm.h"
33 #include "intel_aub.h"
34 #include "gen_context.h"
37 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
40 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
42 #define min(a, b) ({ \
43 __typeof(a) _a = (a); \
44 __typeof(b) _b = (b); \
48 #define max(a, b) ({ \
49 __typeof(a) _a = (a); \
50 __typeof(b) _b = (b); \
55 mem_trace_memory_write_header_out(struct aub_file
*aub
, uint64_t addr
,
56 uint32_t len
, uint32_t addr_space
,
59 static void __attribute__ ((format(__printf__
, 2, 3)))
60 fail_if(int cond
, const char *format
, ...)
67 va_start(args
, format
);
68 vfprintf(stderr
, format
, args
);
74 static inline uint32_t
75 align_u32(uint32_t v
, uint32_t a
)
77 return (v
+ a
- 1) & ~(a
- 1);
81 aub_ppgtt_table_finish(struct aub_ppgtt_table
*table
, int level
)
86 for (unsigned i
= 0; i
< ARRAY_SIZE(table
->subtables
); i
++) {
87 if (table
->subtables
[i
]) {
88 aub_ppgtt_table_finish(table
->subtables
[i
], level
- 1);
89 free(table
->subtables
[i
]);
95 data_out(struct aub_file
*aub
, const void *data
, size_t size
)
100 fail_if(fwrite(data
, 1, size
, aub
->file
) == 0,
101 "Writing to output failed\n");
105 dword_out(struct aub_file
*aub
, uint32_t data
)
107 data_out(aub
, &data
, sizeof(data
));
111 write_execlists_header(struct aub_file
*aub
, const char *name
)
113 char app_name
[8 * 4];
114 int app_name_len
, dwords
;
117 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s",
119 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
121 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
122 dword_out(aub
, CMD_MEM_TRACE_VERSION
| (dwords
- 1));
123 dword_out(aub
, AUB_MEM_TRACE_VERSION_FILE_VERSION
);
124 dword_out(aub
, aub
->devinfo
.simulator_id
<< AUB_MEM_TRACE_VERSION_DEVICE_SHIFT
);
125 dword_out(aub
, 0); /* version */
126 dword_out(aub
, 0); /* version */
127 data_out(aub
, app_name
, app_name_len
);
131 write_legacy_header(struct aub_file
*aub
, const char *name
)
133 char app_name
[8 * 4];
135 int comment_len
, comment_dwords
, dwords
;
137 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", aub
->pci_id
);
138 comment_dwords
= ((comment_len
+ 3) / 4);
140 /* Start with a (required) version packet. */
141 dwords
= 13 + comment_dwords
;
142 dword_out(aub
, CMD_AUB_HEADER
| (dwords
- 2));
143 dword_out(aub
, (4 << AUB_HEADER_MAJOR_SHIFT
) |
144 (0 << AUB_HEADER_MINOR_SHIFT
));
146 /* Next comes a 32-byte application name. */
147 strncpy(app_name
, name
, sizeof(app_name
));
148 app_name
[sizeof(app_name
) - 1] = 0;
149 data_out(aub
, app_name
, sizeof(app_name
));
151 dword_out(aub
, 0); /* timestamp */
152 dword_out(aub
, 0); /* timestamp */
153 dword_out(aub
, comment_len
);
154 data_out(aub
, comment
, comment_dwords
* 4);
159 aub_write_header(struct aub_file
*aub
, const char *app_name
)
161 if (aub_use_execlists(aub
))
162 write_execlists_header(aub
, app_name
);
164 write_legacy_header(aub
, app_name
);
168 aub_file_init(struct aub_file
*aub
, FILE *file
, FILE *debug
, uint16_t pci_id
, const char *app_name
)
170 memset(aub
, 0, sizeof(*aub
));
172 aub
->verbose_log_file
= debug
;
174 aub
->pci_id
= pci_id
;
175 fail_if(!gen_get_device_info_from_pci_id(pci_id
, &aub
->devinfo
),
176 "failed to identify chipset=0x%x\n", pci_id
);
177 aub
->addr_bits
= aub
->devinfo
.gen
>= 8 ? 48 : 32;
179 aub_write_header(aub
, app_name
);
181 aub
->phys_addrs_allocator
= 0;
182 aub
->ggtt_addrs_allocator
= 0;
183 aub
->pml4
.phys_addr
= aub
->phys_addrs_allocator
++ << 12;
185 mem_trace_memory_write_header_out(aub
, aub
->ggtt_addrs_allocator
++,
187 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
,
192 aub
->next_context_handle
= 1;
196 aub_file_finish(struct aub_file
*aub
)
198 aub_ppgtt_table_finish(&aub
->pml4
, 4);
203 aub_gtt_size(struct aub_file
*aub
)
205 return NUM_PT_ENTRIES
* (aub
->addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
209 mem_trace_memory_write_header_out(struct aub_file
*aub
, uint64_t addr
,
210 uint32_t len
, uint32_t addr_space
,
213 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
215 if (aub
->verbose_log_file
) {
216 fprintf(aub
->verbose_log_file
,
217 " MEM WRITE (0x%016" PRIx64
"-0x%016" PRIx64
") %s\n",
218 addr
, addr
+ len
, desc
);
221 dword_out(aub
, CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
222 dword_out(aub
, addr
& 0xFFFFFFFF); /* addr lo */
223 dword_out(aub
, addr
>> 32); /* addr hi */
224 dword_out(aub
, addr_space
); /* gtt */
229 register_write_out(struct aub_file
*aub
, uint32_t addr
, uint32_t value
)
233 if (aub
->verbose_log_file
) {
234 fprintf(aub
->verbose_log_file
,
235 " MMIO WRITE (0x%08x = 0x%08x)\n", addr
, value
);
238 dword_out(aub
, CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
239 dword_out(aub
, addr
);
240 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
241 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
242 dword_out(aub
, 0xFFFFFFFF); /* mask lo */
243 dword_out(aub
, 0x00000000); /* mask hi */
244 dword_out(aub
, value
);
248 populate_ppgtt_table(struct aub_file
*aub
, struct aub_ppgtt_table
*table
,
249 int start
, int end
, int level
)
251 uint64_t entries
[512] = {0};
252 int dirty_start
= 512, dirty_end
= 0;
254 if (aub
->verbose_log_file
) {
255 fprintf(aub
->verbose_log_file
,
256 " PPGTT (0x%016" PRIx64
"), lvl %d, start: %x, end: %x\n",
257 table
->phys_addr
, level
, start
, end
);
260 for (int i
= start
; i
<= end
; i
++) {
261 if (!table
->subtables
[i
]) {
262 dirty_start
= min(dirty_start
, i
);
263 dirty_end
= max(dirty_end
, i
);
265 table
->subtables
[i
] =
266 (void *)(aub
->phys_addrs_allocator
++ << 12);
267 if (aub
->verbose_log_file
) {
268 fprintf(aub
->verbose_log_file
,
269 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
270 i
, (uint64_t)table
->subtables
[i
]);
273 table
->subtables
[i
] =
274 calloc(1, sizeof(struct aub_ppgtt_table
));
275 table
->subtables
[i
]->phys_addr
=
276 aub
->phys_addrs_allocator
++ << 12;
277 if (aub
->verbose_log_file
) {
278 fprintf(aub
->verbose_log_file
,
279 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
280 i
, table
->subtables
[i
]->phys_addr
);
284 entries
[i
] = 3 /* read/write | present */ |
285 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
286 table
->subtables
[i
]->phys_addr
);
289 if (dirty_start
<= dirty_end
) {
290 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
292 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
294 mem_trace_memory_write_header_out(aub
, write_addr
, write_size
,
295 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
,
297 data_out(aub
, entries
+ dirty_start
, write_size
);
302 aub_map_ppgtt(struct aub_file
*aub
, uint64_t start
, uint64_t size
)
304 uint64_t l4_start
= start
& 0xff8000000000;
305 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
307 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
308 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
309 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
310 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
312 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
313 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
314 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
316 if (aub
->verbose_log_file
) {
317 fprintf(aub
->verbose_log_file
,
318 " Mapping PPGTT address: 0x%" PRIx64
", size: %" PRIu64
"\n",
322 populate_ppgtt_table(aub
, &aub
->pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
324 for (uint64_t l4
= l4_start
; l4
< l4_end
; l4
+= (1ULL << 39)) {
325 uint64_t l3_start
= max(l4
, start
& 0xffffc0000000);
326 uint64_t l3_end
= min(l4
+ (1ULL << 39) - 1,
327 ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff);
328 uint64_t l3_start_idx
= L3_index(l3_start
);
329 uint64_t l3_end_idx
= L3_index(l3_end
);
331 populate_ppgtt_table(aub
, L3_table(l4
), l3_start_idx
, l3_end_idx
, 3);
333 for (uint64_t l3
= l3_start
; l3
< l3_end
; l3
+= (1ULL << 30)) {
334 uint64_t l2_start
= max(l3
, start
& 0xffffffe00000);
335 uint64_t l2_end
= min(l3
+ (1ULL << 30) - 1,
336 ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff);
337 uint64_t l2_start_idx
= L2_index(l2_start
);
338 uint64_t l2_end_idx
= L2_index(l2_end
);
340 populate_ppgtt_table(aub
, L2_table(l3
), l2_start_idx
, l2_end_idx
, 2);
342 for (uint64_t l2
= l2_start
; l2
< l2_end
; l2
+= (1ULL << 21)) {
343 uint64_t l1_start
= max(l2
, start
& 0xfffffffff000);
344 uint64_t l1_end
= min(l2
+ (1ULL << 21) - 1,
345 ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff);
346 uint64_t l1_start_idx
= L1_index(l1_start
);
347 uint64_t l1_end_idx
= L1_index(l1_end
);
349 populate_ppgtt_table(aub
, L1_table(l2
), l1_start_idx
, l1_end_idx
, 1);
356 ppgtt_lookup(struct aub_file
*aub
, uint64_t ppgtt_addr
)
358 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
361 static const struct engine
{
363 enum drm_i915_gem_engine_class engine_class
;
368 uint32_t control_reg
;
370 [I915_ENGINE_CLASS_RENDER
] = {
372 .engine_class
= I915_ENGINE_CLASS_RENDER
,
374 .elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
,
375 .elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
,
376 .status_reg
= EXECLIST_STATUS_RCSUNIT
,
377 .control_reg
= EXECLIST_CONTROL_RCSUNIT
,
379 [I915_ENGINE_CLASS_VIDEO
] = {
381 .engine_class
= I915_ENGINE_CLASS_VIDEO
,
383 .elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
,
384 .elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
,
385 .status_reg
= EXECLIST_STATUS_VCSUNIT0
,
386 .control_reg
= EXECLIST_CONTROL_VCSUNIT0
,
388 [I915_ENGINE_CLASS_COPY
] = {
390 .engine_class
= I915_ENGINE_CLASS_COPY
,
392 .elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
,
393 .elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
,
394 .status_reg
= EXECLIST_STATUS_BCSUNIT
,
395 .control_reg
= EXECLIST_CONTROL_BCSUNIT
,
400 aub_map_ggtt(struct aub_file
*aub
, uint64_t virt_addr
, uint64_t size
)
402 /* Makes the code below a bit simpler. In practice all of the write we
403 * receive from error2aub are page aligned.
405 assert(virt_addr
% 4096 == 0);
406 assert((aub
->phys_addrs_allocator
+ size
) < (1UL << 32));
409 uint32_t ggtt_ptes
= DIV_ROUND_UP(size
, 4096);
410 uint64_t phys_addr
= aub
->phys_addrs_allocator
<< 12;
411 aub
->phys_addrs_allocator
+= ggtt_ptes
;
413 if (aub
->verbose_log_file
) {
414 fprintf(aub
->verbose_log_file
,
415 " Mapping GGTT address: 0x%" PRIx64
", size: %" PRIu64
" phys_addr=0x%" PRIx64
" entries=%u\n",
416 virt_addr
, size
, phys_addr
, ggtt_ptes
);
419 mem_trace_memory_write_header_out(aub
,
420 (virt_addr
>> 12) * GEN8_PTE_SIZE
,
421 ggtt_ptes
* GEN8_PTE_SIZE
,
422 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
,
424 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
425 dword_out(aub
, 1 + phys_addr
+ i
* 4096);
431 aub_write_ggtt(struct aub_file
*aub
, uint64_t virt_addr
, uint64_t size
, const void *data
)
433 /* Default setup assumes a 1 to 1 mapping between physical and virtual GGTT
434 * addresses. This is somewhat incompatible with the aub_write_ggtt()
435 * function. In practice it doesn't matter as the GGTT writes are used to
436 * replace the default setup and we've taken care to setup the PML4 as the
439 assert(!aub
->has_default_setup
);
441 aub_map_ggtt(aub
, virt_addr
, size
);
443 /* We write the GGTT buffer through the GGTT aub command rather than the
444 * PHYSICAL aub command. This is because the Gen9 simulator seems to have 2
445 * different set of memory pools for GGTT and physical (probably someone
446 * didn't really understand the concept?).
448 static const char null_block
[8 * 4096];
449 for (uint64_t offset
= 0; offset
< size
; offset
+= 4096) {
450 uint32_t block_size
= min(4096, size
- offset
);
452 mem_trace_memory_write_header_out(aub
, virt_addr
+ offset
, block_size
,
453 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
455 data_out(aub
, (char *) data
+ offset
, block_size
);
457 /* Pad to a multiple of 4 bytes. */
458 data_out(aub
, null_block
, -block_size
& 3);
462 static const struct engine
*
463 engine_from_engine_class(enum drm_i915_gem_engine_class engine_class
)
465 switch (engine_class
) {
466 case I915_ENGINE_CLASS_RENDER
:
467 case I915_ENGINE_CLASS_COPY
:
468 case I915_ENGINE_CLASS_VIDEO
:
469 return &engines
[engine_class
];
471 unreachable("unknown ring");
476 get_context_init(const struct gen_device_info
*devinfo
,
477 const struct gen_context_parameters
*params
,
478 enum drm_i915_gem_engine_class engine_class
,
482 static const gen_context_init_t gen8_contexts
[] = {
483 [I915_ENGINE_CLASS_RENDER
] = gen8_render_context_init
,
484 [I915_ENGINE_CLASS_COPY
] = gen8_blitter_context_init
,
485 [I915_ENGINE_CLASS_VIDEO
] = gen8_video_context_init
,
487 static const gen_context_init_t gen10_contexts
[] = {
488 [I915_ENGINE_CLASS_RENDER
] = gen10_render_context_init
,
489 [I915_ENGINE_CLASS_COPY
] = gen10_blitter_context_init
,
490 [I915_ENGINE_CLASS_VIDEO
] = gen10_video_context_init
,
493 assert(devinfo
->gen
>= 8);
495 if (devinfo
->gen
<= 10)
496 gen8_contexts
[engine_class
](params
, data
, size
);
498 gen10_contexts
[engine_class
](params
, data
, size
);
502 alloc_ggtt_address(struct aub_file
*aub
, uint64_t size
)
504 uint32_t ggtt_ptes
= DIV_ROUND_UP(size
, 4096);
505 uint64_t addr
= aub
->ggtt_addrs_allocator
<< 12;
507 aub
->ggtt_addrs_allocator
+= ggtt_ptes
;
508 aub_map_ggtt(aub
, addr
, size
);
514 write_hwsp(struct aub_file
*aub
,
515 enum drm_i915_gem_engine_class engine_class
)
518 switch (engine_class
) {
519 case I915_ENGINE_CLASS_RENDER
: reg
= HWS_PGA_RCSUNIT
; break;
520 case I915_ENGINE_CLASS_COPY
: reg
= HWS_PGA_BCSUNIT
; break;
521 case I915_ENGINE_CLASS_VIDEO
: reg
= HWS_PGA_VCSUNIT0
; break;
523 unreachable("unknown ring");
526 register_write_out(aub
, reg
, aub
->engine_setup
[engine_class
].hwsp_addr
);
530 write_engine_execlist_setup(struct aub_file
*aub
,
532 struct aub_hw_context
*hw_ctx
,
533 enum drm_i915_gem_engine_class engine_class
)
535 const struct engine
*cs
= engine_from_engine_class(engine_class
);
536 uint32_t context_size
;
538 get_context_init(&aub
->devinfo
, NULL
, engine_class
, NULL
, &context_size
);
541 uint32_t total_size
= RING_SIZE
+ PPHWSP_SIZE
+ context_size
;
543 uint64_t ggtt_addr
= alloc_ggtt_address(aub
, total_size
);
545 snprintf(name
, sizeof(name
), "%s (ctx id: %d) GGTT PT", cs
->name
, ctx_id
);
548 hw_ctx
->ring_addr
= ggtt_addr
;
549 snprintf(name
, sizeof(name
), "%s RING", cs
->name
);
550 mem_trace_memory_write_header_out(aub
, ggtt_addr
, RING_SIZE
,
551 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
553 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
555 ggtt_addr
+= RING_SIZE
;
558 hw_ctx
->pphwsp_addr
= ggtt_addr
;
559 snprintf(name
, sizeof(name
), "%s PPHWSP", cs
->name
);
560 mem_trace_memory_write_header_out(aub
, ggtt_addr
,
561 PPHWSP_SIZE
+ context_size
,
562 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
564 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
568 struct gen_context_parameters params
= {
569 .ring_addr
= hw_ctx
->ring_addr
,
570 .ring_size
= RING_SIZE
,
571 .pml4_addr
= aub
->pml4
.phys_addr
,
573 uint32_t *context_data
= calloc(1, context_size
);
574 get_context_init(&aub
->devinfo
, ¶ms
, engine_class
, context_data
, &context_size
);
575 data_out(aub
, context_data
, context_size
);
578 hw_ctx
->initialized
= true;
584 write_execlists_default_setup(struct aub_file
*aub
)
586 register_write_out(aub
, GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
587 register_write_out(aub
, GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
588 register_write_out(aub
, GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
591 static void write_legacy_default_setup(struct aub_file
*aub
)
593 uint32_t entry
= 0x200003;
595 /* Set up the GTT. The max we can handle is 64M */
596 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
597 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
598 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT_ENTRY
|
599 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
600 dword_out(aub
, 0); /* subtype */
601 dword_out(aub
, 0); /* offset */
602 dword_out(aub
, aub_gtt_size(aub
)); /* size */
603 if (aub
->addr_bits
> 32)
605 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
606 dword_out(aub
, entry
+ 0x1000 * i
);
607 if (aub
->addr_bits
> 32)
613 * Sets up a default GGTT/PPGTT address space and execlists context (when
617 aub_write_default_setup(struct aub_file
*aub
)
619 if (aub_use_execlists(aub
))
620 write_execlists_default_setup(aub
);
622 write_legacy_default_setup(aub
);
624 aub
->has_default_setup
= true;
627 static struct aub_context
*
628 aub_context_new(struct aub_file
*aub
, uint32_t new_id
)
630 assert(aub
->num_contexts
< MAX_CONTEXT_COUNT
);
632 struct aub_context
*ctx
= &aub
->contexts
[aub
->num_contexts
++];
633 memset(ctx
, 0, sizeof(*ctx
));
640 aub_write_context_create(struct aub_file
*aub
, uint32_t *ctx_id
)
642 uint32_t new_id
= ctx_id
? *ctx_id
: aub
->next_context_handle
;
644 aub_context_new(aub
, new_id
);
647 aub
->next_context_handle
++;
652 static struct aub_context
*
653 aub_context_find(struct aub_file
*aub
, uint32_t id
)
655 for (int i
= 0; i
< aub
->num_contexts
; i
++) {
656 if (aub
->contexts
[i
].id
== id
)
657 return &aub
->contexts
[i
];
663 static struct aub_hw_context
*
664 aub_write_ensure_context(struct aub_file
*aub
, uint32_t ctx_id
,
665 enum drm_i915_gem_engine_class engine_class
)
667 struct aub_context
*ctx
= aub_context_find(aub
, ctx_id
);
670 struct aub_hw_context
*hw_ctx
= &ctx
->hw_contexts
[engine_class
];
671 if (!hw_ctx
->initialized
)
672 write_engine_execlist_setup(aub
, ctx
->id
, hw_ctx
, engine_class
);
678 get_context_descriptor(struct aub_file
*aub
,
679 const struct engine
*cs
,
680 struct aub_hw_context
*hw_ctx
)
682 return cs
->hw_class
| hw_ctx
->pphwsp_addr
| CONTEXT_FLAGS
;
686 * Break up large objects into multiple writes. Otherwise a 128kb VBO
687 * would overflow the 16 bits of size field in the packet header and
688 * everything goes badly after that.
691 aub_write_trace_block(struct aub_file
*aub
,
692 uint32_t type
, void *virtual,
693 uint32_t size
, uint64_t gtt_offset
)
696 uint32_t subtype
= 0;
697 static const char null_block
[8 * 4096];
699 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
700 block_size
= min(8 * 4096, size
- offset
);
702 if (aub_use_execlists(aub
)) {
703 block_size
= min(4096, block_size
);
704 mem_trace_memory_write_header_out(aub
,
705 ppgtt_lookup(aub
, gtt_offset
+ offset
),
707 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
,
710 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
711 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
712 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
|
713 type
| AUB_TRACE_OP_DATA_WRITE
);
714 dword_out(aub
, subtype
);
715 dword_out(aub
, gtt_offset
+ offset
);
716 dword_out(aub
, align_u32(block_size
, 4));
717 if (aub
->addr_bits
> 32)
718 dword_out(aub
, (gtt_offset
+ offset
) >> 32);
722 data_out(aub
, ((char *) virtual) + offset
, block_size
);
724 data_out(aub
, null_block
, block_size
);
726 /* Pad to a multiple of 4 bytes. */
727 data_out(aub
, null_block
, -block_size
& 3);
732 aub_dump_ring_buffer_execlist(struct aub_file
*aub
,
733 struct aub_hw_context
*hw_ctx
,
734 const struct engine
*cs
,
735 uint64_t batch_offset
)
737 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
, 16,
738 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
739 "RING MI_BATCH_BUFFER_START user");
740 dword_out(aub
, AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
741 dword_out(aub
, batch_offset
& 0xFFFFFFFF);
742 dword_out(aub
, batch_offset
>> 32);
743 dword_out(aub
, 0 /* MI_NOOP */);
745 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
+ 8192 + 20, 4,
746 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
748 dword_out(aub
, 0); /* RING_BUFFER_HEAD */
749 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
+ 8192 + 28, 4,
750 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
752 dword_out(aub
, 16); /* RING_BUFFER_TAIL */
756 aub_dump_execlist(struct aub_file
*aub
, const struct engine
*cs
, uint64_t descriptor
)
758 if (aub
->devinfo
.gen
>= 11) {
759 register_write_out(aub
, cs
->elsq_reg
, descriptor
& 0xFFFFFFFF);
760 register_write_out(aub
, cs
->elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
761 register_write_out(aub
, cs
->control_reg
, 1);
763 register_write_out(aub
, cs
->elsp_reg
, 0);
764 register_write_out(aub
, cs
->elsp_reg
, 0);
765 register_write_out(aub
, cs
->elsp_reg
, descriptor
>> 32);
766 register_write_out(aub
, cs
->elsp_reg
, descriptor
& 0xFFFFFFFF);
769 dword_out(aub
, CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
770 dword_out(aub
, cs
->status_reg
);
771 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
772 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
773 if (aub
->devinfo
.gen
>= 11) {
774 dword_out(aub
, 0x00000001); /* mask lo */
775 dword_out(aub
, 0x00000000); /* mask hi */
776 dword_out(aub
, 0x00000001);
778 dword_out(aub
, 0x00000010); /* mask lo */
779 dword_out(aub
, 0x00000000); /* mask hi */
780 dword_out(aub
, 0x00000000);
785 aub_dump_ring_buffer_legacy(struct aub_file
*aub
,
786 uint64_t batch_offset
,
788 enum drm_i915_gem_engine_class engine_class
)
790 uint32_t ringbuffer
[4096];
791 unsigned aub_mi_bbs_len
;
793 static const int engine_class_to_ring
[] = {
794 [I915_ENGINE_CLASS_RENDER
] = AUB_TRACE_TYPE_RING_PRB0
,
795 [I915_ENGINE_CLASS_VIDEO
] = AUB_TRACE_TYPE_RING_PRB1
,
796 [I915_ENGINE_CLASS_COPY
] = AUB_TRACE_TYPE_RING_PRB2
,
798 int ring
= engine_class_to_ring
[engine_class
];
800 /* Make a ring buffer to execute our batchbuffer. */
801 memset(ringbuffer
, 0, sizeof(ringbuffer
));
803 aub_mi_bbs_len
= aub
->addr_bits
> 32 ? 3 : 2;
804 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
805 aub_write_reloc(&aub
->devinfo
, &ringbuffer
[ring_count
+ 1], batch_offset
);
806 ring_count
+= aub_mi_bbs_len
;
808 /* Write out the ring. This appears to trigger execution of
809 * the ring in the simulator.
811 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
812 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
813 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
814 dword_out(aub
, 0); /* general/surface subtype */
815 dword_out(aub
, offset
);
816 dword_out(aub
, ring_count
* 4);
817 if (aub
->addr_bits
> 32)
818 dword_out(aub
, offset
>> 32);
820 data_out(aub
, ringbuffer
, ring_count
* 4);
824 aub_write_ensure_hwsp(struct aub_file
*aub
,
825 enum drm_i915_gem_engine_class engine_class
)
827 uint64_t *hwsp_addr
= &aub
->engine_setup
[engine_class
].hwsp_addr
;
832 *hwsp_addr
= alloc_ggtt_address(aub
, 4096);
833 write_hwsp(aub
, engine_class
);
837 aub_write_exec(struct aub_file
*aub
, uint32_t ctx_id
, uint64_t batch_addr
,
838 uint64_t offset
, enum drm_i915_gem_engine_class engine_class
)
840 const struct engine
*cs
= engine_from_engine_class(engine_class
);
842 if (aub_use_execlists(aub
)) {
843 struct aub_hw_context
*hw_ctx
=
844 aub_write_ensure_context(aub
, ctx_id
, engine_class
);
845 uint64_t descriptor
= get_context_descriptor(aub
, cs
, hw_ctx
);
846 aub_write_ensure_hwsp(aub
, engine_class
);
847 aub_dump_ring_buffer_execlist(aub
, hw_ctx
, cs
, batch_addr
);
848 aub_dump_execlist(aub
, cs
, descriptor
);
850 /* Dump ring buffer */
851 aub_dump_ring_buffer_legacy(aub
, batch_addr
, offset
, engine_class
);
857 aub_write_context_execlists(struct aub_file
*aub
, uint64_t context_addr
,
858 enum drm_i915_gem_engine_class engine_class
)
860 const struct engine
*cs
= engine_from_engine_class(engine_class
);
861 uint64_t descriptor
= ((uint64_t)1 << 62 | context_addr
| CONTEXT_FLAGS
);
862 aub_dump_execlist(aub
, cs
, descriptor
);