2 * Copyright © 2015 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 #include "aub_write.h"
32 #include "drm-uapi/i915_drm.h"
33 #include "intel_aub.h"
34 #include "gen_context.h"
37 #define ALIGN(x, y) (((x) + (y)-1) & ~((y)-1))
40 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
42 #define min(a, b) ({ \
43 __typeof(a) _a = (a); \
44 __typeof(b) _b = (b); \
48 #define max(a, b) ({ \
49 __typeof(a) _a = (a); \
50 __typeof(b) _b = (b); \
54 static struct aub_context
*aub_context_new(struct aub_file
*aub
, uint32_t new_id
);
55 static void mem_trace_memory_write_header_out(struct aub_file
*aub
, uint64_t addr
,
56 uint32_t len
, uint32_t addr_space
,
59 static void __attribute__ ((format(__printf__
, 2, 3)))
60 fail_if(int cond
, const char *format
, ...)
67 va_start(args
, format
);
68 vfprintf(stderr
, format
, args
);
74 static inline uint32_t
75 align_u32(uint32_t v
, uint32_t a
)
77 return (v
+ a
- 1) & ~(a
- 1);
81 aub_ppgtt_table_finish(struct aub_ppgtt_table
*table
, int level
)
86 for (unsigned i
= 0; i
< ARRAY_SIZE(table
->subtables
); i
++) {
87 if (table
->subtables
[i
]) {
88 aub_ppgtt_table_finish(table
->subtables
[i
], level
- 1);
89 free(table
->subtables
[i
]);
95 data_out(struct aub_file
*aub
, const void *data
, size_t size
)
100 fail_if(fwrite(data
, 1, size
, aub
->file
) == 0,
101 "Writing to output failed\n");
105 dword_out(struct aub_file
*aub
, uint32_t data
)
107 data_out(aub
, &data
, sizeof(data
));
111 write_execlists_header(struct aub_file
*aub
, const char *name
)
113 char app_name
[8 * 4];
114 int app_name_len
, dwords
;
117 snprintf(app_name
, sizeof(app_name
), "PCI-ID=0x%X %s",
119 app_name_len
= ALIGN(app_name_len
, sizeof(uint32_t));
121 dwords
= 5 + app_name_len
/ sizeof(uint32_t);
122 dword_out(aub
, CMD_MEM_TRACE_VERSION
| (dwords
- 1));
123 dword_out(aub
, AUB_MEM_TRACE_VERSION_FILE_VERSION
);
124 dword_out(aub
, aub
->devinfo
.simulator_id
<< AUB_MEM_TRACE_VERSION_DEVICE_SHIFT
);
125 dword_out(aub
, 0); /* version */
126 dword_out(aub
, 0); /* version */
127 data_out(aub
, app_name
, app_name_len
);
131 write_legacy_header(struct aub_file
*aub
, const char *name
)
133 char app_name
[8 * 4];
135 int comment_len
, comment_dwords
, dwords
;
137 comment_len
= snprintf(comment
, sizeof(comment
), "PCI-ID=0x%x", aub
->pci_id
);
138 comment_dwords
= ((comment_len
+ 3) / 4);
140 /* Start with a (required) version packet. */
141 dwords
= 13 + comment_dwords
;
142 dword_out(aub
, CMD_AUB_HEADER
| (dwords
- 2));
143 dword_out(aub
, (4 << AUB_HEADER_MAJOR_SHIFT
) |
144 (0 << AUB_HEADER_MINOR_SHIFT
));
146 /* Next comes a 32-byte application name. */
147 strncpy(app_name
, name
, sizeof(app_name
));
148 app_name
[sizeof(app_name
) - 1] = 0;
149 data_out(aub
, app_name
, sizeof(app_name
));
151 dword_out(aub
, 0); /* timestamp */
152 dword_out(aub
, 0); /* timestamp */
153 dword_out(aub
, comment_len
);
154 data_out(aub
, comment
, comment_dwords
* 4);
159 aub_write_header(struct aub_file
*aub
, const char *app_name
)
161 if (aub_use_execlists(aub
))
162 write_execlists_header(aub
, app_name
);
164 write_legacy_header(aub
, app_name
);
168 aub_file_init(struct aub_file
*aub
, FILE *file
, FILE *debug
, uint16_t pci_id
, const char *app_name
)
170 memset(aub
, 0, sizeof(*aub
));
172 aub
->verbose_log_file
= debug
;
174 aub
->pci_id
= pci_id
;
175 fail_if(!gen_get_device_info_from_pci_id(pci_id
, &aub
->devinfo
),
176 "failed to identify chipset=0x%x\n", pci_id
);
177 aub
->addr_bits
= aub
->devinfo
.gen
>= 8 ? 48 : 32;
179 aub_write_header(aub
, app_name
);
181 aub
->phys_addrs_allocator
= 0;
182 aub
->ggtt_addrs_allocator
= 0;
183 aub
->pml4
.phys_addr
= aub
->phys_addrs_allocator
++ << 12;
185 mem_trace_memory_write_header_out(aub
, aub
->ggtt_addrs_allocator
++,
187 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
,
192 aub
->next_context_handle
= 1;
193 aub_context_new(aub
, 0); /* Default context */
197 aub_file_finish(struct aub_file
*aub
)
199 aub_ppgtt_table_finish(&aub
->pml4
, 4);
204 aub_gtt_size(struct aub_file
*aub
)
206 return NUM_PT_ENTRIES
* (aub
->addr_bits
> 32 ? GEN8_PTE_SIZE
: PTE_SIZE
);
210 mem_trace_memory_write_header_out(struct aub_file
*aub
, uint64_t addr
,
211 uint32_t len
, uint32_t addr_space
,
214 uint32_t dwords
= ALIGN(len
, sizeof(uint32_t)) / sizeof(uint32_t);
216 if (aub
->verbose_log_file
) {
217 fprintf(aub
->verbose_log_file
,
218 " MEM WRITE (0x%016" PRIx64
"-0x%016" PRIx64
") %s\n",
219 addr
, addr
+ len
, desc
);
222 dword_out(aub
, CMD_MEM_TRACE_MEMORY_WRITE
| (5 + dwords
- 1));
223 dword_out(aub
, addr
& 0xFFFFFFFF); /* addr lo */
224 dword_out(aub
, addr
>> 32); /* addr hi */
225 dword_out(aub
, addr_space
); /* gtt */
230 register_write_out(struct aub_file
*aub
, uint32_t addr
, uint32_t value
)
234 if (aub
->verbose_log_file
) {
235 fprintf(aub
->verbose_log_file
,
236 " MMIO WRITE (0x%08x = 0x%08x)\n", addr
, value
);
239 dword_out(aub
, CMD_MEM_TRACE_REGISTER_WRITE
| (5 + dwords
- 1));
240 dword_out(aub
, addr
);
241 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
242 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
243 dword_out(aub
, 0xFFFFFFFF); /* mask lo */
244 dword_out(aub
, 0x00000000); /* mask hi */
245 dword_out(aub
, value
);
249 populate_ppgtt_table(struct aub_file
*aub
, struct aub_ppgtt_table
*table
,
250 int start
, int end
, int level
)
252 uint64_t entries
[512] = {0};
253 int dirty_start
= 512, dirty_end
= 0;
255 if (aub
->verbose_log_file
) {
256 fprintf(aub
->verbose_log_file
,
257 " PPGTT (0x%016" PRIx64
"), lvl %d, start: %x, end: %x\n",
258 table
->phys_addr
, level
, start
, end
);
261 for (int i
= start
; i
<= end
; i
++) {
262 if (!table
->subtables
[i
]) {
263 dirty_start
= min(dirty_start
, i
);
264 dirty_end
= max(dirty_end
, i
);
266 table
->subtables
[i
] =
267 (void *)(aub
->phys_addrs_allocator
++ << 12);
268 if (aub
->verbose_log_file
) {
269 fprintf(aub
->verbose_log_file
,
270 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
271 i
, (uint64_t)table
->subtables
[i
]);
274 table
->subtables
[i
] =
275 calloc(1, sizeof(struct aub_ppgtt_table
));
276 table
->subtables
[i
]->phys_addr
=
277 aub
->phys_addrs_allocator
++ << 12;
278 if (aub
->verbose_log_file
) {
279 fprintf(aub
->verbose_log_file
,
280 " Adding entry: %x, phys_addr: 0x%016" PRIx64
"\n",
281 i
, table
->subtables
[i
]->phys_addr
);
285 entries
[i
] = 3 /* read/write | present */ |
286 (level
== 1 ? (uint64_t)table
->subtables
[i
] :
287 table
->subtables
[i
]->phys_addr
);
290 if (dirty_start
<= dirty_end
) {
291 uint64_t write_addr
= table
->phys_addr
+ dirty_start
*
293 uint64_t write_size
= (dirty_end
- dirty_start
+ 1) *
295 mem_trace_memory_write_header_out(aub
, write_addr
, write_size
,
296 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
,
298 data_out(aub
, entries
+ dirty_start
, write_size
);
303 aub_map_ppgtt(struct aub_file
*aub
, uint64_t start
, uint64_t size
)
305 uint64_t l4_start
= start
& 0xff8000000000;
306 uint64_t l4_end
= ((start
+ size
- 1) | 0x007fffffffff) & 0xffffffffffff;
308 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
309 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
310 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
311 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
313 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
314 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
315 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
317 if (aub
->verbose_log_file
) {
318 fprintf(aub
->verbose_log_file
,
319 " Mapping PPGTT address: 0x%" PRIx64
", size: %" PRIu64
"\n",
323 populate_ppgtt_table(aub
, &aub
->pml4
, L4_index(l4_start
), L4_index(l4_end
), 4);
325 for (uint64_t l4
= l4_start
; l4
< l4_end
; l4
+= (1ULL << 39)) {
326 uint64_t l3_start
= max(l4
, start
& 0xffffc0000000);
327 uint64_t l3_end
= min(l4
+ (1ULL << 39) - 1,
328 ((start
+ size
- 1) | 0x00003fffffff) & 0xffffffffffff);
329 uint64_t l3_start_idx
= L3_index(l3_start
);
330 uint64_t l3_end_idx
= L3_index(l3_end
);
332 populate_ppgtt_table(aub
, L3_table(l4
), l3_start_idx
, l3_end_idx
, 3);
334 for (uint64_t l3
= l3_start
; l3
< l3_end
; l3
+= (1ULL << 30)) {
335 uint64_t l2_start
= max(l3
, start
& 0xffffffe00000);
336 uint64_t l2_end
= min(l3
+ (1ULL << 30) - 1,
337 ((start
+ size
- 1) | 0x0000001fffff) & 0xffffffffffff);
338 uint64_t l2_start_idx
= L2_index(l2_start
);
339 uint64_t l2_end_idx
= L2_index(l2_end
);
341 populate_ppgtt_table(aub
, L2_table(l3
), l2_start_idx
, l2_end_idx
, 2);
343 for (uint64_t l2
= l2_start
; l2
< l2_end
; l2
+= (1ULL << 21)) {
344 uint64_t l1_start
= max(l2
, start
& 0xfffffffff000);
345 uint64_t l1_end
= min(l2
+ (1ULL << 21) - 1,
346 ((start
+ size
- 1) | 0x000000000fff) & 0xffffffffffff);
347 uint64_t l1_start_idx
= L1_index(l1_start
);
348 uint64_t l1_end_idx
= L1_index(l1_end
);
350 populate_ppgtt_table(aub
, L1_table(l2
), l1_start_idx
, l1_end_idx
, 1);
357 ppgtt_lookup(struct aub_file
*aub
, uint64_t ppgtt_addr
)
359 return (uint64_t)L1_table(ppgtt_addr
)->subtables
[L1_index(ppgtt_addr
)];
362 static const struct engine
{
364 enum drm_i915_gem_engine_class engine_class
;
369 uint32_t control_reg
;
371 [I915_ENGINE_CLASS_RENDER
] = {
373 .engine_class
= I915_ENGINE_CLASS_RENDER
,
375 .elsp_reg
= EXECLIST_SUBMITPORT_RCSUNIT
,
376 .elsq_reg
= EXECLIST_SQ_CONTENTS0_RCSUNIT
,
377 .status_reg
= EXECLIST_STATUS_RCSUNIT
,
378 .control_reg
= EXECLIST_CONTROL_RCSUNIT
,
380 [I915_ENGINE_CLASS_VIDEO
] = {
382 .engine_class
= I915_ENGINE_CLASS_VIDEO
,
384 .elsp_reg
= EXECLIST_SUBMITPORT_VCSUNIT0
,
385 .elsq_reg
= EXECLIST_SQ_CONTENTS0_VCSUNIT0
,
386 .status_reg
= EXECLIST_STATUS_VCSUNIT0
,
387 .control_reg
= EXECLIST_CONTROL_VCSUNIT0
,
389 [I915_ENGINE_CLASS_COPY
] = {
391 .engine_class
= I915_ENGINE_CLASS_COPY
,
393 .elsp_reg
= EXECLIST_SUBMITPORT_BCSUNIT
,
394 .elsq_reg
= EXECLIST_SQ_CONTENTS0_BCSUNIT
,
395 .status_reg
= EXECLIST_STATUS_BCSUNIT
,
396 .control_reg
= EXECLIST_CONTROL_BCSUNIT
,
401 aub_map_ggtt(struct aub_file
*aub
, uint64_t virt_addr
, uint64_t size
)
403 /* Makes the code below a bit simpler. In practice all of the write we
404 * receive from error2aub are page aligned.
406 assert(virt_addr
% 4096 == 0);
407 assert((aub
->phys_addrs_allocator
+ size
) < (1UL << 32));
410 uint32_t ggtt_ptes
= DIV_ROUND_UP(size
, 4096);
411 uint64_t phys_addr
= aub
->phys_addrs_allocator
<< 12;
412 aub
->phys_addrs_allocator
+= ggtt_ptes
;
414 if (aub
->verbose_log_file
) {
415 fprintf(aub
->verbose_log_file
,
416 " Mapping GGTT address: 0x%" PRIx64
", size: %" PRIu64
" phys_addr=0x%" PRIx64
" entries=%u\n",
417 virt_addr
, size
, phys_addr
, ggtt_ptes
);
420 mem_trace_memory_write_header_out(aub
,
421 (virt_addr
>> 12) * GEN8_PTE_SIZE
,
422 ggtt_ptes
* GEN8_PTE_SIZE
,
423 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY
,
425 for (uint32_t i
= 0; i
< ggtt_ptes
; i
++) {
426 dword_out(aub
, 1 + phys_addr
+ i
* 4096);
432 aub_write_ggtt(struct aub_file
*aub
, uint64_t virt_addr
, uint64_t size
, const void *data
)
434 /* Default setup assumes a 1 to 1 mapping between physical and virtual GGTT
435 * addresses. This is somewhat incompatible with the aub_write_ggtt()
436 * function. In practice it doesn't matter as the GGTT writes are used to
437 * replace the default setup and we've taken care to setup the PML4 as the
440 assert(!aub
->has_default_setup
);
442 aub_map_ggtt(aub
, virt_addr
, size
);
444 /* We write the GGTT buffer through the GGTT aub command rather than the
445 * PHYSICAL aub command. This is because the Gen9 simulator seems to have 2
446 * different set of memory pools for GGTT and physical (probably someone
447 * didn't really understand the concept?).
449 static const char null_block
[8 * 4096];
450 for (uint64_t offset
= 0; offset
< size
; offset
+= 4096) {
451 uint32_t block_size
= min(4096, size
- offset
);
453 mem_trace_memory_write_header_out(aub
, virt_addr
+ offset
, block_size
,
454 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
456 data_out(aub
, (char *) data
+ offset
, block_size
);
458 /* Pad to a multiple of 4 bytes. */
459 data_out(aub
, null_block
, -block_size
& 3);
463 static const struct engine
*
464 engine_from_engine_class(enum drm_i915_gem_engine_class engine_class
)
466 switch (engine_class
) {
467 case I915_ENGINE_CLASS_RENDER
:
468 case I915_ENGINE_CLASS_COPY
:
469 case I915_ENGINE_CLASS_VIDEO
:
470 return &engines
[engine_class
];
472 unreachable("unknown ring");
477 get_context_init(const struct gen_device_info
*devinfo
,
478 const struct gen_context_parameters
*params
,
479 enum drm_i915_gem_engine_class engine_class
,
483 static const gen_context_init_t gen8_contexts
[] = {
484 [I915_ENGINE_CLASS_RENDER
] = gen8_render_context_init
,
485 [I915_ENGINE_CLASS_COPY
] = gen8_blitter_context_init
,
486 [I915_ENGINE_CLASS_VIDEO
] = gen8_video_context_init
,
488 static const gen_context_init_t gen10_contexts
[] = {
489 [I915_ENGINE_CLASS_RENDER
] = gen10_render_context_init
,
490 [I915_ENGINE_CLASS_COPY
] = gen10_blitter_context_init
,
491 [I915_ENGINE_CLASS_VIDEO
] = gen10_video_context_init
,
494 assert(devinfo
->gen
>= 8);
496 if (devinfo
->gen
<= 10)
497 gen8_contexts
[engine_class
](params
, data
, size
);
499 gen10_contexts
[engine_class
](params
, data
, size
);
503 alloc_ggtt_address(struct aub_file
*aub
, uint64_t size
)
505 uint32_t ggtt_ptes
= DIV_ROUND_UP(size
, 4096);
506 uint64_t addr
= aub
->ggtt_addrs_allocator
<< 12;
508 aub
->ggtt_addrs_allocator
+= ggtt_ptes
;
509 aub_map_ggtt(aub
, addr
, size
);
515 write_hwsp(struct aub_file
*aub
,
516 enum drm_i915_gem_engine_class engine_class
)
519 switch (engine_class
) {
520 case I915_ENGINE_CLASS_RENDER
: reg
= HWS_PGA_RCSUNIT
; break;
521 case I915_ENGINE_CLASS_COPY
: reg
= HWS_PGA_BCSUNIT
; break;
522 case I915_ENGINE_CLASS_VIDEO
: reg
= HWS_PGA_VCSUNIT0
; break;
524 unreachable("unknown ring");
527 register_write_out(aub
, reg
, aub
->engine_setup
[engine_class
].hwsp_addr
);
531 write_engine_execlist_setup(struct aub_file
*aub
,
533 struct aub_hw_context
*hw_ctx
,
534 enum drm_i915_gem_engine_class engine_class
)
536 const struct engine
*cs
= engine_from_engine_class(engine_class
);
537 uint32_t context_size
;
539 get_context_init(&aub
->devinfo
, NULL
, engine_class
, NULL
, &context_size
);
542 uint32_t total_size
= RING_SIZE
+ PPHWSP_SIZE
+ context_size
;
544 uint64_t ggtt_addr
= alloc_ggtt_address(aub
, total_size
);
546 snprintf(name
, sizeof(name
), "%s (ctx id: %d) GGTT PT", cs
->name
, ctx_id
);
549 hw_ctx
->ring_addr
= ggtt_addr
;
550 snprintf(name
, sizeof(name
), "%s RING", cs
->name
);
551 mem_trace_memory_write_header_out(aub
, ggtt_addr
, RING_SIZE
,
552 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
554 for (uint32_t i
= 0; i
< RING_SIZE
; i
+= sizeof(uint32_t))
556 ggtt_addr
+= RING_SIZE
;
559 hw_ctx
->pphwsp_addr
= ggtt_addr
;
560 snprintf(name
, sizeof(name
), "%s PPHWSP", cs
->name
);
561 mem_trace_memory_write_header_out(aub
, ggtt_addr
,
562 PPHWSP_SIZE
+ context_size
,
563 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
565 for (uint32_t i
= 0; i
< PPHWSP_SIZE
; i
+= sizeof(uint32_t))
569 struct gen_context_parameters params
= {
570 .ring_addr
= hw_ctx
->ring_addr
,
571 .ring_size
= RING_SIZE
,
572 .pml4_addr
= aub
->pml4
.phys_addr
,
574 uint32_t *context_data
= calloc(1, context_size
);
575 get_context_init(&aub
->devinfo
, ¶ms
, engine_class
, context_data
, &context_size
);
576 data_out(aub
, context_data
, context_size
);
579 hw_ctx
->initialized
= true;
585 write_execlists_default_setup(struct aub_file
*aub
)
587 register_write_out(aub
, GFX_MODE_RCSUNIT
, 0x80008000 /* execlist enable */);
588 register_write_out(aub
, GFX_MODE_VCSUNIT0
, 0x80008000 /* execlist enable */);
589 register_write_out(aub
, GFX_MODE_BCSUNIT
, 0x80008000 /* execlist enable */);
592 static void write_legacy_default_setup(struct aub_file
*aub
)
594 uint32_t entry
= 0x200003;
596 /* Set up the GTT. The max we can handle is 64M */
597 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
598 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
599 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT_ENTRY
|
600 AUB_TRACE_TYPE_NOTYPE
| AUB_TRACE_OP_DATA_WRITE
);
601 dword_out(aub
, 0); /* subtype */
602 dword_out(aub
, 0); /* offset */
603 dword_out(aub
, aub_gtt_size(aub
)); /* size */
604 if (aub
->addr_bits
> 32)
606 for (uint32_t i
= 0; i
< NUM_PT_ENTRIES
; i
++) {
607 dword_out(aub
, entry
+ 0x1000 * i
);
608 if (aub
->addr_bits
> 32)
614 * Sets up a default GGTT/PPGTT address space and execlists context (when
618 aub_write_default_setup(struct aub_file
*aub
)
620 if (aub_use_execlists(aub
))
621 write_execlists_default_setup(aub
);
623 write_legacy_default_setup(aub
);
625 aub
->has_default_setup
= true;
628 static struct aub_context
*
629 aub_context_new(struct aub_file
*aub
, uint32_t new_id
)
631 assert(aub
->num_contexts
< MAX_CONTEXT_COUNT
);
633 struct aub_context
*ctx
= &aub
->contexts
[aub
->num_contexts
++];
634 memset(ctx
, 0, sizeof(*ctx
));
641 aub_write_context_create(struct aub_file
*aub
, uint32_t *ctx_id
)
643 uint32_t new_id
= ctx_id
? *ctx_id
: aub
->next_context_handle
;
645 aub_context_new(aub
, new_id
);
648 aub
->next_context_handle
++;
653 static struct aub_context
*
654 aub_context_find(struct aub_file
*aub
, uint32_t id
)
656 for (int i
= 0; i
< aub
->num_contexts
; i
++) {
657 if (aub
->contexts
[i
].id
== id
)
658 return &aub
->contexts
[i
];
664 static struct aub_hw_context
*
665 aub_write_ensure_context(struct aub_file
*aub
, uint32_t ctx_id
,
666 enum drm_i915_gem_engine_class engine_class
)
668 struct aub_context
*ctx
= aub_context_find(aub
, ctx_id
);
671 struct aub_hw_context
*hw_ctx
= &ctx
->hw_contexts
[engine_class
];
672 if (!hw_ctx
->initialized
)
673 write_engine_execlist_setup(aub
, ctx
->id
, hw_ctx
, engine_class
);
679 get_context_descriptor(struct aub_file
*aub
,
680 const struct engine
*cs
,
681 struct aub_hw_context
*hw_ctx
)
683 return cs
->hw_class
| hw_ctx
->pphwsp_addr
| CONTEXT_FLAGS
;
687 * Break up large objects into multiple writes. Otherwise a 128kb VBO
688 * would overflow the 16 bits of size field in the packet header and
689 * everything goes badly after that.
692 aub_write_trace_block(struct aub_file
*aub
,
693 uint32_t type
, void *virtual,
694 uint32_t size
, uint64_t gtt_offset
)
697 uint32_t subtype
= 0;
698 static const char null_block
[8 * 4096];
700 for (uint32_t offset
= 0; offset
< size
; offset
+= block_size
) {
701 block_size
= min(8 * 4096, size
- offset
);
703 if (aub_use_execlists(aub
)) {
704 block_size
= min(4096, block_size
);
705 mem_trace_memory_write_header_out(aub
,
706 ppgtt_lookup(aub
, gtt_offset
+ offset
),
708 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL
,
711 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
712 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
713 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
|
714 type
| AUB_TRACE_OP_DATA_WRITE
);
715 dword_out(aub
, subtype
);
716 dword_out(aub
, gtt_offset
+ offset
);
717 dword_out(aub
, align_u32(block_size
, 4));
718 if (aub
->addr_bits
> 32)
719 dword_out(aub
, (gtt_offset
+ offset
) >> 32);
723 data_out(aub
, ((char *) virtual) + offset
, block_size
);
725 data_out(aub
, null_block
, block_size
);
727 /* Pad to a multiple of 4 bytes. */
728 data_out(aub
, null_block
, -block_size
& 3);
733 aub_dump_ring_buffer_execlist(struct aub_file
*aub
,
734 struct aub_hw_context
*hw_ctx
,
735 const struct engine
*cs
,
736 uint64_t batch_offset
)
738 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
, 16,
739 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
740 "RING MI_BATCH_BUFFER_START user");
741 dword_out(aub
, AUB_MI_BATCH_BUFFER_START
| MI_BATCH_NON_SECURE_I965
| (3 - 2));
742 dword_out(aub
, batch_offset
& 0xFFFFFFFF);
743 dword_out(aub
, batch_offset
>> 32);
744 dword_out(aub
, 0 /* MI_NOOP */);
746 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
+ 8192 + 20, 4,
747 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
749 dword_out(aub
, 0); /* RING_BUFFER_HEAD */
750 mem_trace_memory_write_header_out(aub
, hw_ctx
->ring_addr
+ 8192 + 28, 4,
751 AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT
,
753 dword_out(aub
, 16); /* RING_BUFFER_TAIL */
757 aub_dump_execlist(struct aub_file
*aub
, const struct engine
*cs
, uint64_t descriptor
)
759 if (aub
->devinfo
.gen
>= 11) {
760 register_write_out(aub
, cs
->elsq_reg
, descriptor
& 0xFFFFFFFF);
761 register_write_out(aub
, cs
->elsq_reg
+ sizeof(uint32_t), descriptor
>> 32);
762 register_write_out(aub
, cs
->control_reg
, 1);
764 register_write_out(aub
, cs
->elsp_reg
, 0);
765 register_write_out(aub
, cs
->elsp_reg
, 0);
766 register_write_out(aub
, cs
->elsp_reg
, descriptor
>> 32);
767 register_write_out(aub
, cs
->elsp_reg
, descriptor
& 0xFFFFFFFF);
770 dword_out(aub
, CMD_MEM_TRACE_REGISTER_POLL
| (5 + 1 - 1));
771 dword_out(aub
, cs
->status_reg
);
772 dword_out(aub
, AUB_MEM_TRACE_REGISTER_SIZE_DWORD
|
773 AUB_MEM_TRACE_REGISTER_SPACE_MMIO
);
774 if (aub
->devinfo
.gen
>= 11) {
775 dword_out(aub
, 0x00000001); /* mask lo */
776 dword_out(aub
, 0x00000000); /* mask hi */
777 dword_out(aub
, 0x00000001);
779 dword_out(aub
, 0x00000010); /* mask lo */
780 dword_out(aub
, 0x00000000); /* mask hi */
781 dword_out(aub
, 0x00000000);
786 aub_dump_ring_buffer_legacy(struct aub_file
*aub
,
787 uint64_t batch_offset
,
789 enum drm_i915_gem_engine_class engine_class
)
791 uint32_t ringbuffer
[4096];
792 unsigned aub_mi_bbs_len
;
794 static const int engine_class_to_ring
[] = {
795 [I915_ENGINE_CLASS_RENDER
] = AUB_TRACE_TYPE_RING_PRB0
,
796 [I915_ENGINE_CLASS_VIDEO
] = AUB_TRACE_TYPE_RING_PRB1
,
797 [I915_ENGINE_CLASS_COPY
] = AUB_TRACE_TYPE_RING_PRB2
,
799 int ring
= engine_class_to_ring
[engine_class
];
801 /* Make a ring buffer to execute our batchbuffer. */
802 memset(ringbuffer
, 0, sizeof(ringbuffer
));
804 aub_mi_bbs_len
= aub
->addr_bits
> 32 ? 3 : 2;
805 ringbuffer
[ring_count
] = AUB_MI_BATCH_BUFFER_START
| (aub_mi_bbs_len
- 2);
806 aub_write_reloc(&aub
->devinfo
, &ringbuffer
[ring_count
+ 1], batch_offset
);
807 ring_count
+= aub_mi_bbs_len
;
809 /* Write out the ring. This appears to trigger execution of
810 * the ring in the simulator.
812 dword_out(aub
, CMD_AUB_TRACE_HEADER_BLOCK
|
813 ((aub
->addr_bits
> 32 ? 6 : 5) - 2));
814 dword_out(aub
, AUB_TRACE_MEMTYPE_GTT
| ring
| AUB_TRACE_OP_COMMAND_WRITE
);
815 dword_out(aub
, 0); /* general/surface subtype */
816 dword_out(aub
, offset
);
817 dword_out(aub
, ring_count
* 4);
818 if (aub
->addr_bits
> 32)
819 dword_out(aub
, offset
>> 32);
821 data_out(aub
, ringbuffer
, ring_count
* 4);
825 aub_write_ensure_hwsp(struct aub_file
*aub
,
826 enum drm_i915_gem_engine_class engine_class
)
828 uint64_t *hwsp_addr
= &aub
->engine_setup
[engine_class
].hwsp_addr
;
833 *hwsp_addr
= alloc_ggtt_address(aub
, 4096);
834 write_hwsp(aub
, engine_class
);
838 aub_write_exec(struct aub_file
*aub
, uint32_t ctx_id
, uint64_t batch_addr
,
839 uint64_t offset
, enum drm_i915_gem_engine_class engine_class
)
841 const struct engine
*cs
= engine_from_engine_class(engine_class
);
843 if (aub_use_execlists(aub
)) {
844 struct aub_hw_context
*hw_ctx
=
845 aub_write_ensure_context(aub
, ctx_id
, engine_class
);
846 uint64_t descriptor
= get_context_descriptor(aub
, cs
, hw_ctx
);
847 aub_write_ensure_hwsp(aub
, engine_class
);
848 aub_dump_ring_buffer_execlist(aub
, hw_ctx
, cs
, batch_addr
);
849 aub_dump_execlist(aub
, cs
, descriptor
);
851 /* Dump ring buffer */
852 aub_dump_ring_buffer_legacy(aub
, batch_addr
, offset
, engine_class
);
858 aub_write_context_execlists(struct aub_file
*aub
, uint64_t context_addr
,
859 enum drm_i915_gem_engine_class engine_class
)
861 const struct engine
*cs
= engine_from_engine_class(engine_class
);
862 uint64_t descriptor
= ((uint64_t)1 << 62 | context_addr
| CONTEXT_FLAGS
);
863 aub_dump_execlist(aub
, cs
, descriptor
);