2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * Batchbuffer and command submission module.
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
43 #include "iris_fence.h"
45 #include "drm-uapi/i915_drm.h"
47 #include "util/hash_table.h"
49 #include "main/macros.h"
62 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
64 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
65 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
66 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
68 #define BATCH_RESERVED 16
71 iris_batch_reset(struct iris_batch
*batch
);
74 num_fences(struct iris_batch
*batch
)
76 return util_dynarray_num_elements(&batch
->exec_fences
,
77 struct drm_i915_gem_exec_fence
);
81 * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
84 dump_fence_list(struct iris_batch
*batch
)
86 fprintf(stderr
, "Fence list (length %u): ", num_fences(batch
));
88 util_dynarray_foreach(&batch
->exec_fences
,
89 struct drm_i915_gem_exec_fence
, f
) {
90 fprintf(stderr
, "%s%u%s ",
91 (f
->flags
& I915_EXEC_FENCE_WAIT
) ? "..." : "",
93 (f
->flags
& I915_EXEC_FENCE_SIGNAL
) ? "!" : "");
96 fprintf(stderr
, "\n");
100 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
103 dump_validation_list(struct iris_batch
*batch
)
105 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
107 for (int i
= 0; i
< batch
->exec_count
; i
++) {
108 uint64_t flags
= batch
->validation_list
[i
].flags
;
109 assert(batch
->validation_list
[i
].handle
==
110 batch
->exec_bos
[i
]->gem_handle
);
111 fprintf(stderr
, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64
"B)\t %2d refs %s\n",
113 batch
->validation_list
[i
].handle
,
114 batch
->exec_bos
[i
]->name
,
115 batch
->validation_list
[i
].offset
,
116 batch
->exec_bos
[i
]->size
,
117 batch
->exec_bos
[i
]->refcount
,
118 (flags
& EXEC_OBJECT_WRITE
) ? " (write)" : "");
123 * Return BO information to the batch decoder (for debugging).
125 static struct gen_batch_decode_bo
126 decode_get_bo(void *v_batch
, bool ppgtt
, uint64_t address
)
128 struct iris_batch
*batch
= v_batch
;
132 for (int i
= 0; i
< batch
->exec_count
; i
++) {
133 struct iris_bo
*bo
= batch
->exec_bos
[i
];
134 /* The decoder zeroes out the top 16 bits, so we need to as well */
135 uint64_t bo_address
= bo
->gtt_offset
& (~0ull >> 16);
137 if (address
>= bo_address
&& address
< bo_address
+ bo
->size
) {
138 return (struct gen_batch_decode_bo
) {
141 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
142 (address
- bo_address
),
147 return (struct gen_batch_decode_bo
) { };
151 * Decode the current batch.
154 decode_batch(struct iris_batch
*batch
)
156 void *map
= iris_bo_map(batch
->dbg
, batch
->exec_bos
[0], MAP_READ
);
157 gen_print_batch(&batch
->decoder
, map
, batch
->primary_batch_size
,
158 batch
->exec_bos
[0]->gtt_offset
, false);
162 iris_init_batch(struct iris_batch
*batch
,
163 struct iris_screen
*screen
,
164 struct iris_vtable
*vtbl
,
165 struct pipe_debug_callback
*dbg
,
166 struct iris_batch
*all_batches
,
167 enum iris_batch_name name
,
171 batch
->screen
= screen
;
176 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
177 assert((engine
& ~I915_EXEC_RING_MASK
) == 0);
178 assert(util_bitcount(engine
) == 1);
179 batch
->engine
= engine
;
181 batch
->hw_ctx_id
= iris_create_hw_context(screen
->bufmgr
);
182 assert(batch
->hw_ctx_id
);
184 iris_hw_context_set_priority(screen
->bufmgr
, batch
->hw_ctx_id
, priority
);
186 util_dynarray_init(&batch
->exec_fences
, ralloc_context(NULL
));
187 util_dynarray_init(&batch
->syncpts
, ralloc_context(NULL
));
189 batch
->exec_count
= 0;
190 batch
->exec_array_size
= 100;
192 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
193 batch
->validation_list
=
194 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
196 batch
->cache
.render
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
197 _mesa_key_pointer_equal
);
198 batch
->cache
.depth
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
199 _mesa_key_pointer_equal
);
201 memset(batch
->other_batches
, 0, sizeof(batch
->other_batches
));
203 for (int i
= 0, j
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
204 if (&all_batches
[i
] != batch
)
205 batch
->other_batches
[j
++] = &all_batches
[i
];
208 if (unlikely(INTEL_DEBUG
)) {
209 const unsigned decode_flags
=
210 GEN_BATCH_DECODE_FULL
|
211 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
212 GEN_BATCH_DECODE_OFFSETS
|
213 GEN_BATCH_DECODE_FLOATS
;
215 /* TODO: track state size so we can print the right # of entries */
216 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
217 stderr
, decode_flags
, NULL
,
218 decode_get_bo
, NULL
, batch
);
219 batch
->decoder
.max_vbo_decoded_lines
= 32;
222 iris_batch_reset(batch
);
225 static struct drm_i915_gem_exec_object2
*
226 find_validation_entry(struct iris_batch
*batch
, struct iris_bo
*bo
)
228 unsigned index
= READ_ONCE(bo
->index
);
230 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
231 return &batch
->validation_list
[index
];
233 /* May have been shared between multiple active batches */
234 for (index
= 0; index
< batch
->exec_count
; index
++) {
235 if (batch
->exec_bos
[index
] == bo
)
236 return &batch
->validation_list
[index
];
243 * Add a buffer to the current batch's validation list.
245 * You must call this on any BO you wish to use in this batch, to ensure
246 * that it's resident when the GPU commands execute.
249 iris_use_pinned_bo(struct iris_batch
*batch
,
253 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
255 /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care
256 * about the order of any writes to that buffer, and marking it writable
257 * would introduce data dependencies between multiple batches which share
260 if (bo
== batch
->screen
->workaround_bo
)
263 struct drm_i915_gem_exec_object2
*existing_entry
=
264 find_validation_entry(batch
, bo
);
266 if (existing_entry
) {
267 /* The BO is already in the validation list; mark it writable */
269 existing_entry
->flags
|= EXEC_OBJECT_WRITE
;
274 if (bo
!= batch
->bo
) {
275 /* This is the first time our batch has seen this BO. Before we use it,
276 * we may need to flush and synchronize with other batches.
278 for (int b
= 0; b
< ARRAY_SIZE(batch
->other_batches
); b
++) {
279 struct drm_i915_gem_exec_object2
*other_entry
=
280 find_validation_entry(batch
->other_batches
[b
], bo
);
282 /* If the buffer is referenced by another batch, and either batch
283 * intends to write it, then flush the other batch and synchronize.
285 * Consider these cases:
287 * 1. They read, we read => No synchronization required.
288 * 2. They read, we write => Synchronize (they need the old value)
289 * 3. They write, we read => Synchronize (we need their new value)
290 * 4. They write, we write => Synchronize (order writes)
292 * The read/read case is very common, as multiple batches usually
293 * share a streaming state buffer or shader assembly buffer, and
294 * we want to avoid synchronizing in this case.
297 ((other_entry
->flags
& EXEC_OBJECT_WRITE
) || writable
)) {
298 iris_batch_flush(batch
->other_batches
[b
]);
299 iris_batch_add_syncpt(batch
, batch
->other_batches
[b
]->last_syncpt
,
300 I915_EXEC_FENCE_WAIT
);
305 /* Now, take a reference and add it to the validation list. */
306 iris_bo_reference(bo
);
308 if (batch
->exec_count
== batch
->exec_array_size
) {
309 batch
->exec_array_size
*= 2;
311 realloc(batch
->exec_bos
,
312 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
313 batch
->validation_list
=
314 realloc(batch
->validation_list
,
315 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
318 batch
->validation_list
[batch
->exec_count
] =
319 (struct drm_i915_gem_exec_object2
) {
320 .handle
= bo
->gem_handle
,
321 .offset
= bo
->gtt_offset
,
322 .flags
= bo
->kflags
| (writable
? EXEC_OBJECT_WRITE
: 0),
325 bo
->index
= batch
->exec_count
;
326 batch
->exec_bos
[batch
->exec_count
] = bo
;
327 batch
->aperture_space
+= bo
->size
;
333 create_batch(struct iris_batch
*batch
)
335 struct iris_screen
*screen
= batch
->screen
;
336 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
338 batch
->bo
= iris_bo_alloc(bufmgr
, "command buffer",
339 BATCH_SZ
+ BATCH_RESERVED
, IRIS_MEMZONE_OTHER
);
340 batch
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
341 batch
->map
= iris_bo_map(NULL
, batch
->bo
, MAP_READ
| MAP_WRITE
);
342 batch
->map_next
= batch
->map
;
344 iris_use_pinned_bo(batch
, batch
->bo
, false);
348 iris_batch_reset(struct iris_batch
*batch
)
350 struct iris_screen
*screen
= batch
->screen
;
352 iris_bo_unreference(batch
->bo
);
353 batch
->primary_batch_size
= 0;
354 batch
->contains_draw
= false;
357 assert(batch
->bo
->index
== 0);
359 struct iris_syncpt
*syncpt
= iris_create_syncpt(screen
);
360 iris_batch_add_syncpt(batch
, syncpt
, I915_EXEC_FENCE_SIGNAL
);
361 iris_syncpt_reference(screen
, &syncpt
, NULL
);
363 iris_cache_sets_clear(batch
);
367 iris_batch_free(struct iris_batch
*batch
)
369 struct iris_screen
*screen
= batch
->screen
;
370 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
372 for (int i
= 0; i
< batch
->exec_count
; i
++) {
373 iris_bo_unreference(batch
->exec_bos
[i
]);
375 free(batch
->exec_bos
);
376 free(batch
->validation_list
);
378 ralloc_free(batch
->exec_fences
.mem_ctx
);
380 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
381 iris_syncpt_reference(screen
, s
, NULL
);
382 ralloc_free(batch
->syncpts
.mem_ctx
);
384 iris_syncpt_reference(screen
, &batch
->last_syncpt
, NULL
);
386 iris_bo_unreference(batch
->bo
);
389 batch
->map_next
= NULL
;
391 iris_destroy_hw_context(bufmgr
, batch
->hw_ctx_id
);
393 _mesa_hash_table_destroy(batch
->cache
.render
, NULL
);
394 _mesa_set_destroy(batch
->cache
.depth
, NULL
);
396 if (unlikely(INTEL_DEBUG
))
397 gen_batch_decode_ctx_finish(&batch
->decoder
);
401 * If we've chained to a secondary batch, or are getting near to the end,
402 * then flush. This should only be called between draws.
405 iris_batch_maybe_flush(struct iris_batch
*batch
, unsigned estimate
)
407 if (batch
->bo
!= batch
->exec_bos
[0] ||
408 iris_batch_bytes_used(batch
) + estimate
>= BATCH_SZ
) {
409 iris_batch_flush(batch
);
414 iris_chain_to_new_batch(struct iris_batch
*batch
)
416 /* We only support chaining a single time. */
417 assert(batch
->bo
== batch
->exec_bos
[0]);
419 VG(void *map
= batch
->map
);
420 uint32_t *cmd
= batch
->map_next
;
421 uint64_t *addr
= batch
->map_next
+ 4;
422 batch
->map_next
+= 12;
424 /* No longer held by batch->bo, still held by validation list */
425 iris_bo_unreference(batch
->bo
);
426 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
429 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
430 *cmd
= (0x31 << 23) | (1 << 8) | (3 - 2);
431 *addr
= batch
->bo
->gtt_offset
;
433 VG(VALGRIND_CHECK_MEM_IS_DEFINED(map
, batch
->primary_batch_size
));
437 * Terminate a batch with MI_BATCH_BUFFER_END.
440 iris_finish_batch(struct iris_batch
*batch
)
442 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
443 uint32_t *map
= batch
->map_next
;
445 map
[0] = (0xA << 23);
447 batch
->map_next
+= 4;
448 VG(VALGRIND_CHECK_MEM_IS_DEFINED(batch
->map
, iris_batch_bytes_used(batch
)));
450 if (batch
->bo
== batch
->exec_bos
[0])
451 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
455 * Submit the batch to the GPU via execbuffer2.
458 submit_batch(struct iris_batch
*batch
)
460 iris_bo_unmap(batch
->bo
);
462 /* The requirement for using I915_EXEC_NO_RELOC are:
464 * The addresses written in the objects must match the corresponding
465 * reloc.gtt_offset which in turn must match the corresponding
468 * Any render targets written to in the batch must be flagged with
471 * To avoid stalling, execobject.offset should match the current
472 * address of that object within the active context.
474 struct drm_i915_gem_execbuffer2 execbuf
= {
475 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
476 .buffer_count
= batch
->exec_count
,
477 .batch_start_offset
= 0,
478 /* This must be QWord aligned. */
479 .batch_len
= ALIGN(batch
->primary_batch_size
, 8),
480 .flags
= batch
->engine
|
482 I915_EXEC_BATCH_FIRST
|
483 I915_EXEC_HANDLE_LUT
,
484 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
487 if (num_fences(batch
)) {
488 execbuf
.flags
|= I915_EXEC_FENCE_ARRAY
;
489 execbuf
.num_cliprects
= num_fences(batch
);
490 execbuf
.cliprects_ptr
=
491 (uintptr_t)util_dynarray_begin(&batch
->exec_fences
);
494 int ret
= drm_ioctl(batch
->screen
->fd
,
495 DRM_IOCTL_I915_GEM_EXECBUFFER2
,
499 DBG("execbuf FAILED: errno = %d\n", -ret
);
500 fprintf(stderr
, "execbuf FAILED: errno = %d\n", -ret
);
503 DBG("execbuf succeeded\n");
506 for (int i
= 0; i
< batch
->exec_count
; i
++) {
507 struct iris_bo
*bo
= batch
->exec_bos
[i
];
512 iris_bo_unreference(bo
);
519 batch_name_to_string(enum iris_batch_name name
)
521 const char *names
[IRIS_BATCH_COUNT
] = {
522 [IRIS_BATCH_RENDER
] = "render",
523 [IRIS_BATCH_COMPUTE
] = "compute",
529 * Flush the batch buffer, submitting it to the GPU and resetting it so
530 * we're ready to emit the next batch.
532 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
533 * ownership of the fd.
535 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
536 * take ownership of the returned fd.
539 _iris_batch_flush(struct iris_batch
*batch
, const char *file
, int line
)
541 struct iris_screen
*screen
= batch
->screen
;
543 if (iris_batch_bytes_used(batch
) == 0)
546 iris_finish_batch(batch
);
548 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
549 int bytes_for_commands
= iris_batch_bytes_used(batch
);
550 int second_bytes
= 0;
551 if (batch
->bo
!= batch
->exec_bos
[0]) {
552 second_bytes
= bytes_for_commands
;
553 bytes_for_commands
+= batch
->primary_batch_size
;
555 fprintf(stderr
, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
556 "(cmds), %4d BOs (%0.1fMb aperture)\n",
557 file
, line
, batch_name_to_string(batch
->name
), batch
->hw_ctx_id
,
558 batch
->primary_batch_size
, second_bytes
,
559 100.0f
* bytes_for_commands
/ BATCH_SZ
,
561 (float) batch
->aperture_space
/ (1024 * 1024));
562 dump_fence_list(batch
);
563 dump_validation_list(batch
);
566 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
)) {
570 int ret
= submit_batch(batch
);
573 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
574 //iris_check_for_reset(ice);
576 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
577 dbg_printf("waiting for idle\n");
578 iris_bo_wait_rendering(batch
->bo
);
582 const bool color
= INTEL_DEBUG
& DEBUG_COLOR
;
583 fprintf(stderr
, "%siris: Failed to submit batchbuffer: %-80s%s\n",
584 color
? "\e[1;41m" : "", strerror(-ret
), color
? "\e[0m" : "");
589 batch
->exec_count
= 0;
590 batch
->aperture_space
= 0;
592 struct iris_syncpt
*syncpt
=
593 ((struct iris_syncpt
**) util_dynarray_begin(&batch
->syncpts
))[0];
594 iris_syncpt_reference(screen
, &batch
->last_syncpt
, syncpt
);
596 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
597 iris_syncpt_reference(screen
, s
, NULL
);
598 util_dynarray_clear(&batch
->syncpts
);
600 util_dynarray_clear(&batch
->exec_fences
);
602 /* Start a new batch buffer. */
603 iris_batch_reset(batch
);
607 * Does the current batch refer to the given BO?
609 * (In other words, is the BO in the current batch's validation list?)
612 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
614 return find_validation_entry(batch
, bo
) != NULL
;