2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * Batchbuffer and command submission module.
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
43 #include "iris_fence.h"
45 #include "drm-uapi/i915_drm.h"
47 #include "util/hash_table.h"
49 #include "main/macros.h"
62 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
64 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
65 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
66 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
68 #define BATCH_RESERVED 16
71 iris_batch_reset(struct iris_batch
*batch
);
74 num_fences(struct iris_batch
*batch
)
76 return util_dynarray_num_elements(&batch
->exec_fences
,
77 struct drm_i915_gem_exec_fence
);
81 * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
84 dump_fence_list(struct iris_batch
*batch
)
86 fprintf(stderr
, "Fence list (length %u): ", num_fences(batch
));
88 util_dynarray_foreach(&batch
->exec_fences
,
89 struct drm_i915_gem_exec_fence
, f
) {
90 fprintf(stderr
, "%s%u%s ",
91 (f
->flags
& I915_EXEC_FENCE_WAIT
) ? "..." : "",
93 (f
->flags
& I915_EXEC_FENCE_SIGNAL
) ? "!" : "");
96 fprintf(stderr
, "\n");
100 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
103 dump_validation_list(struct iris_batch
*batch
)
105 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
107 for (int i
= 0; i
< batch
->exec_count
; i
++) {
108 uint64_t flags
= batch
->validation_list
[i
].flags
;
109 assert(batch
->validation_list
[i
].handle
==
110 batch
->exec_bos
[i
]->gem_handle
);
111 fprintf(stderr
, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64
"B)\t %2d refs %s\n",
113 batch
->validation_list
[i
].handle
,
114 batch
->exec_bos
[i
]->name
,
115 batch
->validation_list
[i
].offset
,
116 batch
->exec_bos
[i
]->size
,
117 batch
->exec_bos
[i
]->refcount
,
118 (flags
& EXEC_OBJECT_WRITE
) ? " (write)" : "");
123 * Return BO information to the batch decoder (for debugging).
125 static struct gen_batch_decode_bo
126 decode_get_bo(void *v_batch
, bool ppgtt
, uint64_t address
)
128 struct iris_batch
*batch
= v_batch
;
132 for (int i
= 0; i
< batch
->exec_count
; i
++) {
133 struct iris_bo
*bo
= batch
->exec_bos
[i
];
134 /* The decoder zeroes out the top 16 bits, so we need to as well */
135 uint64_t bo_address
= bo
->gtt_offset
& (~0ull >> 16);
137 if (address
>= bo_address
&& address
< bo_address
+ bo
->size
) {
138 return (struct gen_batch_decode_bo
) {
141 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
142 (address
- bo_address
),
147 return (struct gen_batch_decode_bo
) { };
151 decode_get_state_size(void *v_batch
, uint32_t offset_from_base
)
153 struct iris_batch
*batch
= v_batch
;
155 /* The decoder gives us offsets from a base address, which is not great.
156 * Binding tables are relative to surface state base address, and other
157 * state is relative to dynamic state base address. These could alias,
158 * but in practice it's unlikely because surface offsets are always in
159 * the [0, 64K) range, and we assign dynamic state addresses starting at
160 * the top of the 4GB range. We should fix this but it's likely good
163 unsigned size
= (uintptr_t)
164 _mesa_hash_table_u64_search(batch
->state_sizes
, offset_from_base
);
170 * Decode the current batch.
173 decode_batch(struct iris_batch
*batch
)
175 void *map
= iris_bo_map(batch
->dbg
, batch
->exec_bos
[0], MAP_READ
);
176 gen_print_batch(&batch
->decoder
, map
, batch
->primary_batch_size
,
177 batch
->exec_bos
[0]->gtt_offset
, false);
181 iris_init_batch(struct iris_batch
*batch
,
182 struct iris_screen
*screen
,
183 struct iris_vtable
*vtbl
,
184 struct pipe_debug_callback
*dbg
,
185 struct pipe_device_reset_callback
*reset
,
186 struct hash_table_u64
*state_sizes
,
187 struct iris_batch
*all_batches
,
188 enum iris_batch_name name
,
192 batch
->screen
= screen
;
195 batch
->reset
= reset
;
196 batch
->state_sizes
= state_sizes
;
199 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
200 assert((engine
& ~I915_EXEC_RING_MASK
) == 0);
201 assert(util_bitcount(engine
) == 1);
202 batch
->engine
= engine
;
204 batch
->hw_ctx_id
= iris_create_hw_context(screen
->bufmgr
);
205 assert(batch
->hw_ctx_id
);
207 iris_hw_context_set_priority(screen
->bufmgr
, batch
->hw_ctx_id
, priority
);
209 util_dynarray_init(&batch
->exec_fences
, ralloc_context(NULL
));
210 util_dynarray_init(&batch
->syncpts
, ralloc_context(NULL
));
212 batch
->exec_count
= 0;
213 batch
->exec_array_size
= 100;
215 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
216 batch
->validation_list
=
217 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
219 batch
->cache
.render
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
220 _mesa_key_pointer_equal
);
221 batch
->cache
.depth
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
222 _mesa_key_pointer_equal
);
224 memset(batch
->other_batches
, 0, sizeof(batch
->other_batches
));
226 for (int i
= 0, j
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
227 if (&all_batches
[i
] != batch
)
228 batch
->other_batches
[j
++] = &all_batches
[i
];
231 if (unlikely(INTEL_DEBUG
)) {
232 const unsigned decode_flags
=
233 GEN_BATCH_DECODE_FULL
|
234 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
235 GEN_BATCH_DECODE_OFFSETS
|
236 GEN_BATCH_DECODE_FLOATS
;
238 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
239 stderr
, decode_flags
, NULL
,
240 decode_get_bo
, decode_get_state_size
, batch
);
241 batch
->decoder
.max_vbo_decoded_lines
= 32;
244 iris_batch_reset(batch
);
247 static struct drm_i915_gem_exec_object2
*
248 find_validation_entry(struct iris_batch
*batch
, struct iris_bo
*bo
)
250 unsigned index
= READ_ONCE(bo
->index
);
252 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
253 return &batch
->validation_list
[index
];
255 /* May have been shared between multiple active batches */
256 for (index
= 0; index
< batch
->exec_count
; index
++) {
257 if (batch
->exec_bos
[index
] == bo
)
258 return &batch
->validation_list
[index
];
265 * Add a buffer to the current batch's validation list.
267 * You must call this on any BO you wish to use in this batch, to ensure
268 * that it's resident when the GPU commands execute.
271 iris_use_pinned_bo(struct iris_batch
*batch
,
275 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
277 /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care
278 * about the order of any writes to that buffer, and marking it writable
279 * would introduce data dependencies between multiple batches which share
282 if (bo
== batch
->screen
->workaround_bo
)
285 struct drm_i915_gem_exec_object2
*existing_entry
=
286 find_validation_entry(batch
, bo
);
288 if (existing_entry
) {
289 /* The BO is already in the validation list; mark it writable */
291 existing_entry
->flags
|= EXEC_OBJECT_WRITE
;
296 if (bo
!= batch
->bo
) {
297 /* This is the first time our batch has seen this BO. Before we use it,
298 * we may need to flush and synchronize with other batches.
300 for (int b
= 0; b
< ARRAY_SIZE(batch
->other_batches
); b
++) {
301 struct drm_i915_gem_exec_object2
*other_entry
=
302 find_validation_entry(batch
->other_batches
[b
], bo
);
304 /* If the buffer is referenced by another batch, and either batch
305 * intends to write it, then flush the other batch and synchronize.
307 * Consider these cases:
309 * 1. They read, we read => No synchronization required.
310 * 2. They read, we write => Synchronize (they need the old value)
311 * 3. They write, we read => Synchronize (we need their new value)
312 * 4. They write, we write => Synchronize (order writes)
314 * The read/read case is very common, as multiple batches usually
315 * share a streaming state buffer or shader assembly buffer, and
316 * we want to avoid synchronizing in this case.
319 ((other_entry
->flags
& EXEC_OBJECT_WRITE
) || writable
)) {
320 iris_batch_flush(batch
->other_batches
[b
]);
321 iris_batch_add_syncpt(batch
, batch
->other_batches
[b
]->last_syncpt
,
322 I915_EXEC_FENCE_WAIT
);
327 /* Now, take a reference and add it to the validation list. */
328 iris_bo_reference(bo
);
330 if (batch
->exec_count
== batch
->exec_array_size
) {
331 batch
->exec_array_size
*= 2;
333 realloc(batch
->exec_bos
,
334 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
335 batch
->validation_list
=
336 realloc(batch
->validation_list
,
337 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
340 batch
->validation_list
[batch
->exec_count
] =
341 (struct drm_i915_gem_exec_object2
) {
342 .handle
= bo
->gem_handle
,
343 .offset
= bo
->gtt_offset
,
344 .flags
= bo
->kflags
| (writable
? EXEC_OBJECT_WRITE
: 0),
347 bo
->index
= batch
->exec_count
;
348 batch
->exec_bos
[batch
->exec_count
] = bo
;
349 batch
->aperture_space
+= bo
->size
;
355 create_batch(struct iris_batch
*batch
)
357 struct iris_screen
*screen
= batch
->screen
;
358 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
360 batch
->bo
= iris_bo_alloc(bufmgr
, "command buffer",
361 BATCH_SZ
+ BATCH_RESERVED
, IRIS_MEMZONE_OTHER
);
362 batch
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
363 batch
->map
= iris_bo_map(NULL
, batch
->bo
, MAP_READ
| MAP_WRITE
);
364 batch
->map_next
= batch
->map
;
366 iris_use_pinned_bo(batch
, batch
->bo
, false);
370 iris_batch_reset(struct iris_batch
*batch
)
372 struct iris_screen
*screen
= batch
->screen
;
374 iris_bo_unreference(batch
->bo
);
375 batch
->primary_batch_size
= 0;
376 batch
->contains_draw
= false;
379 assert(batch
->bo
->index
== 0);
381 struct iris_syncpt
*syncpt
= iris_create_syncpt(screen
);
382 iris_batch_add_syncpt(batch
, syncpt
, I915_EXEC_FENCE_SIGNAL
);
383 iris_syncpt_reference(screen
, &syncpt
, NULL
);
385 iris_cache_sets_clear(batch
);
389 iris_batch_free(struct iris_batch
*batch
)
391 struct iris_screen
*screen
= batch
->screen
;
392 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
394 for (int i
= 0; i
< batch
->exec_count
; i
++) {
395 iris_bo_unreference(batch
->exec_bos
[i
]);
397 free(batch
->exec_bos
);
398 free(batch
->validation_list
);
400 ralloc_free(batch
->exec_fences
.mem_ctx
);
402 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
403 iris_syncpt_reference(screen
, s
, NULL
);
404 ralloc_free(batch
->syncpts
.mem_ctx
);
406 iris_syncpt_reference(screen
, &batch
->last_syncpt
, NULL
);
408 iris_bo_unreference(batch
->bo
);
411 batch
->map_next
= NULL
;
413 iris_destroy_hw_context(bufmgr
, batch
->hw_ctx_id
);
415 _mesa_hash_table_destroy(batch
->cache
.render
, NULL
);
416 _mesa_set_destroy(batch
->cache
.depth
, NULL
);
418 if (unlikely(INTEL_DEBUG
))
419 gen_batch_decode_ctx_finish(&batch
->decoder
);
423 * If we've chained to a secondary batch, or are getting near to the end,
424 * then flush. This should only be called between draws.
427 iris_batch_maybe_flush(struct iris_batch
*batch
, unsigned estimate
)
429 if (batch
->bo
!= batch
->exec_bos
[0] ||
430 iris_batch_bytes_used(batch
) + estimate
>= BATCH_SZ
) {
431 iris_batch_flush(batch
);
436 iris_chain_to_new_batch(struct iris_batch
*batch
)
438 /* We only support chaining a single time. */
439 assert(batch
->bo
== batch
->exec_bos
[0]);
441 VG(void *map
= batch
->map
);
442 uint32_t *cmd
= batch
->map_next
;
443 uint64_t *addr
= batch
->map_next
+ 4;
444 batch
->map_next
+= 12;
446 /* No longer held by batch->bo, still held by validation list */
447 iris_bo_unreference(batch
->bo
);
448 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
451 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
452 *cmd
= (0x31 << 23) | (1 << 8) | (3 - 2);
453 *addr
= batch
->bo
->gtt_offset
;
455 VG(VALGRIND_CHECK_MEM_IS_DEFINED(map
, batch
->primary_batch_size
));
459 * Terminate a batch with MI_BATCH_BUFFER_END.
462 iris_finish_batch(struct iris_batch
*batch
)
464 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
465 uint32_t *map
= batch
->map_next
;
467 map
[0] = (0xA << 23);
469 batch
->map_next
+= 4;
470 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->map
, iris_batch_bytes_used(batch
)));
472 if (batch
->bo
== batch
->exec_bos
[0])
473 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
477 * Replace our current GEM context with a new one (in case it got banned).
480 replace_hw_ctx(struct iris_batch
*batch
)
482 struct iris_screen
*screen
= batch
->screen
;
483 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
485 uint32_t new_ctx
= iris_clone_hw_context(bufmgr
, batch
->hw_ctx_id
);
489 iris_destroy_hw_context(bufmgr
, batch
->hw_ctx_id
);
490 batch
->hw_ctx_id
= new_ctx
;
492 /* Notify the context that state must be re-initialized. */
493 iris_lost_context_state(batch
);
498 enum pipe_reset_status
499 iris_batch_check_for_reset(struct iris_batch
*batch
)
501 struct iris_screen
*screen
= batch
->screen
;
502 enum pipe_reset_status status
= PIPE_NO_RESET
;
503 struct drm_i915_reset_stats stats
= { .ctx_id
= batch
->hw_ctx_id
};
505 if (drmIoctl(screen
->fd
, DRM_IOCTL_I915_GET_RESET_STATS
, &stats
))
506 DBG("DRM_IOCTL_I915_GET_RESET_STATS failed: %s\n", strerror(errno
));
508 if (stats
.batch_active
!= 0) {
509 /* A reset was observed while a batch from this hardware context was
510 * executing. Assume that this context was at fault.
512 status
= PIPE_GUILTY_CONTEXT_RESET
;
513 } else if (stats
.batch_pending
!= 0) {
514 /* A reset was observed while a batch from this context was in progress,
515 * but the batch was not executing. In this case, assume that the
516 * context was not at fault.
518 status
= PIPE_INNOCENT_CONTEXT_RESET
;
521 if (status
!= PIPE_NO_RESET
) {
522 /* Our context is likely banned, or at least in an unknown state.
523 * Throw it away and start with a fresh context. Ideally this may
524 * catch the problem before our next execbuf fails with -EIO.
526 replace_hw_ctx(batch
);
533 * Submit the batch to the GPU via execbuffer2.
536 submit_batch(struct iris_batch
*batch
)
538 iris_bo_unmap(batch
->bo
);
540 /* The requirement for using I915_EXEC_NO_RELOC are:
542 * The addresses written in the objects must match the corresponding
543 * reloc.gtt_offset which in turn must match the corresponding
546 * Any render targets written to in the batch must be flagged with
549 * To avoid stalling, execobject.offset should match the current
550 * address of that object within the active context.
552 struct drm_i915_gem_execbuffer2 execbuf
= {
553 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
554 .buffer_count
= batch
->exec_count
,
555 .batch_start_offset
= 0,
556 /* This must be QWord aligned. */
557 .batch_len
= ALIGN(batch
->primary_batch_size
, 8),
558 .flags
= batch
->engine
|
560 I915_EXEC_BATCH_FIRST
|
561 I915_EXEC_HANDLE_LUT
,
562 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
565 if (num_fences(batch
)) {
566 execbuf
.flags
|= I915_EXEC_FENCE_ARRAY
;
567 execbuf
.num_cliprects
= num_fences(batch
);
568 execbuf
.cliprects_ptr
=
569 (uintptr_t)util_dynarray_begin(&batch
->exec_fences
);
573 if (!batch
->screen
->no_hw
&&
574 drm_ioctl(batch
->screen
->fd
, DRM_IOCTL_I915_GEM_EXECBUFFER2
, &execbuf
))
577 for (int i
= 0; i
< batch
->exec_count
; i
++) {
578 struct iris_bo
*bo
= batch
->exec_bos
[i
];
583 iris_bo_unreference(bo
);
590 batch_name_to_string(enum iris_batch_name name
)
592 const char *names
[IRIS_BATCH_COUNT
] = {
593 [IRIS_BATCH_RENDER
] = "render",
594 [IRIS_BATCH_COMPUTE
] = "compute",
600 * Flush the batch buffer, submitting it to the GPU and resetting it so
601 * we're ready to emit the next batch.
603 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
604 * ownership of the fd.
606 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
607 * take ownership of the returned fd.
610 _iris_batch_flush(struct iris_batch
*batch
, const char *file
, int line
)
612 struct iris_screen
*screen
= batch
->screen
;
614 if (iris_batch_bytes_used(batch
) == 0)
617 iris_finish_batch(batch
);
619 if (unlikely(INTEL_DEBUG
&
620 (DEBUG_BATCH
| DEBUG_SUBMIT
| DEBUG_PIPE_CONTROL
))) {
621 int bytes_for_commands
= iris_batch_bytes_used(batch
);
622 int second_bytes
= 0;
623 if (batch
->bo
!= batch
->exec_bos
[0]) {
624 second_bytes
= bytes_for_commands
;
625 bytes_for_commands
+= batch
->primary_batch_size
;
627 fprintf(stderr
, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
628 "(cmds), %4d BOs (%0.1fMb aperture)\n",
629 file
, line
, batch_name_to_string(batch
->name
), batch
->hw_ctx_id
,
630 batch
->primary_batch_size
, second_bytes
,
631 100.0f
* bytes_for_commands
/ BATCH_SZ
,
633 (float) batch
->aperture_space
/ (1024 * 1024));
635 if (INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
)) {
636 dump_fence_list(batch
);
637 dump_validation_list(batch
);
640 if (INTEL_DEBUG
& DEBUG_BATCH
) {
645 int ret
= submit_batch(batch
);
647 batch
->exec_count
= 0;
648 batch
->aperture_space
= 0;
650 struct iris_syncpt
*syncpt
=
651 ((struct iris_syncpt
**) util_dynarray_begin(&batch
->syncpts
))[0];
652 iris_syncpt_reference(screen
, &batch
->last_syncpt
, syncpt
);
654 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
655 iris_syncpt_reference(screen
, s
, NULL
);
656 util_dynarray_clear(&batch
->syncpts
);
658 util_dynarray_clear(&batch
->exec_fences
);
660 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
661 dbg_printf("waiting for idle\n");
662 iris_bo_wait_rendering(batch
->bo
); /* if execbuf failed; this is a nop */
665 /* Start a new batch buffer. */
666 iris_batch_reset(batch
);
668 /* EIO means our context is banned. In this case, try and replace it
669 * with a new logical context, and inform iris_context that all state
670 * has been lost and needs to be re-initialized. If this succeeds,
671 * dubiously claim success...
673 if (ret
== -EIO
&& replace_hw_ctx(batch
)) {
674 if (batch
->reset
->reset
) {
675 /* Tell the state tracker the device is lost and it was our fault. */
676 batch
->reset
->reset(batch
->reset
->data
, PIPE_GUILTY_CONTEXT_RESET
);
684 const bool color
= INTEL_DEBUG
& DEBUG_COLOR
;
685 fprintf(stderr
, "%siris: Failed to submit batchbuffer: %-80s%s\n",
686 color
? "\e[1;41m" : "", strerror(-ret
), color
? "\e[0m" : "");
693 * Does the current batch refer to the given BO?
695 * (In other words, is the BO in the current batch's validation list?)
698 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
700 return find_validation_entry(batch
, bo
) != NULL
;