2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * Batchbuffer and command submission module.
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
43 #include "iris_fence.h"
45 #include "drm-uapi/i915_drm.h"
47 #include "util/hash_table.h"
49 #include "main/macros.h"
54 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
56 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
57 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
58 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
60 #define BATCH_RESERVED 16
63 iris_batch_reset(struct iris_batch
*batch
);
66 num_fences(struct iris_batch
*batch
)
68 return util_dynarray_num_elements(&batch
->exec_fences
,
69 struct drm_i915_gem_exec_fence
);
73 * Debugging code to dump the fence list, used by INTEL_DEBUG=submit.
76 dump_fence_list(struct iris_batch
*batch
)
78 fprintf(stderr
, "Fence list (length %u): ", num_fences(batch
));
80 util_dynarray_foreach(&batch
->exec_fences
,
81 struct drm_i915_gem_exec_fence
, f
) {
82 fprintf(stderr
, "%s%u%s ",
83 (f
->flags
& I915_EXEC_FENCE_WAIT
) ? "..." : "",
85 (f
->flags
& I915_EXEC_FENCE_SIGNAL
) ? "!" : "");
88 fprintf(stderr
, "\n");
92 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
95 dump_validation_list(struct iris_batch
*batch
)
97 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
99 for (int i
= 0; i
< batch
->exec_count
; i
++) {
100 uint64_t flags
= batch
->validation_list
[i
].flags
;
101 assert(batch
->validation_list
[i
].handle
==
102 batch
->exec_bos
[i
]->gem_handle
);
103 fprintf(stderr
, "[%2d]: %2d %-14s @ 0x%016llx (%"PRIu64
"B)\t %2d refs %s\n",
105 batch
->validation_list
[i
].handle
,
106 batch
->exec_bos
[i
]->name
,
107 batch
->validation_list
[i
].offset
,
108 batch
->exec_bos
[i
]->size
,
109 batch
->exec_bos
[i
]->refcount
,
110 (flags
& EXEC_OBJECT_WRITE
) ? " (write)" : "");
115 * Return BO information to the batch decoder (for debugging).
117 static struct gen_batch_decode_bo
118 decode_get_bo(void *v_batch
, uint64_t address
)
120 struct iris_batch
*batch
= v_batch
;
122 for (int i
= 0; i
< batch
->exec_count
; i
++) {
123 struct iris_bo
*bo
= batch
->exec_bos
[i
];
124 /* The decoder zeroes out the top 16 bits, so we need to as well */
125 uint64_t bo_address
= bo
->gtt_offset
& (~0ull >> 16);
127 if (address
>= bo_address
&& address
< bo_address
+ bo
->size
) {
128 return (struct gen_batch_decode_bo
) {
131 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
132 (address
- bo_address
),
137 return (struct gen_batch_decode_bo
) { };
141 * Decode the current batch.
144 decode_batch(struct iris_batch
*batch
)
146 void *map
= iris_bo_map(batch
->dbg
, batch
->exec_bos
[0], MAP_READ
);
147 gen_print_batch(&batch
->decoder
, map
, batch
->primary_batch_size
,
148 batch
->exec_bos
[0]->gtt_offset
);
152 iris_init_batch(struct iris_batch
*batch
,
153 struct iris_screen
*screen
,
154 struct iris_vtable
*vtbl
,
155 struct pipe_debug_callback
*dbg
,
156 struct iris_batch
*all_batches
,
157 enum iris_batch_name name
,
160 batch
->screen
= screen
;
165 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
166 assert((engine
& ~I915_EXEC_RING_MASK
) == 0);
167 assert(util_bitcount(engine
) == 1);
168 batch
->engine
= engine
;
170 batch
->hw_ctx_id
= iris_create_hw_context(screen
->bufmgr
);
171 assert(batch
->hw_ctx_id
);
173 util_dynarray_init(&batch
->exec_fences
, ralloc_context(NULL
));
174 util_dynarray_init(&batch
->syncpts
, ralloc_context(NULL
));
176 batch
->exec_count
= 0;
177 batch
->exec_array_size
= 100;
179 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
180 batch
->validation_list
=
181 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
183 batch
->cache
.render
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
184 _mesa_key_pointer_equal
);
185 batch
->cache
.depth
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
186 _mesa_key_pointer_equal
);
188 memset(batch
->other_batches
, 0, sizeof(batch
->other_batches
));
190 for (int i
= 0, j
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
191 if (&all_batches
[i
] != batch
)
192 batch
->other_batches
[j
++] = &all_batches
[i
];
195 if (unlikely(INTEL_DEBUG
)) {
196 const unsigned decode_flags
=
197 GEN_BATCH_DECODE_FULL
|
198 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
199 GEN_BATCH_DECODE_OFFSETS
|
200 GEN_BATCH_DECODE_FLOATS
;
202 /* TODO: track state size so we can print the right # of entries */
203 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
204 stderr
, decode_flags
, NULL
,
205 decode_get_bo
, NULL
, batch
);
206 batch
->decoder
.max_vbo_decoded_lines
= 32;
209 iris_batch_reset(batch
);
212 static struct drm_i915_gem_exec_object2
*
213 find_validation_entry(struct iris_batch
*batch
, struct iris_bo
*bo
)
215 unsigned index
= READ_ONCE(bo
->index
);
217 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
218 return &batch
->validation_list
[index
];
220 /* May have been shared between multiple active batches */
221 for (index
= 0; index
< batch
->exec_count
; index
++) {
222 if (batch
->exec_bos
[index
] == bo
)
223 return &batch
->validation_list
[index
];
230 * Add a buffer to the current batch's validation list.
232 * You must call this on any BO you wish to use in this batch, to ensure
233 * that it's resident when the GPU commands execute.
236 iris_use_pinned_bo(struct iris_batch
*batch
,
240 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
242 /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care
243 * about the order of any writes to that buffer, and marking it writable
244 * would introduce data dependencies between multiple batches which share
247 if (bo
== batch
->screen
->workaround_bo
)
250 struct drm_i915_gem_exec_object2
*existing_entry
=
251 find_validation_entry(batch
, bo
);
253 if (existing_entry
) {
254 /* The BO is already in the validation list; mark it writable */
256 existing_entry
->flags
|= EXEC_OBJECT_WRITE
;
261 if (bo
!= batch
->bo
) {
262 /* This is the first time our batch has seen this BO. Before we use it,
263 * we may need to flush and synchronize with other batches.
265 for (int b
= 0; b
< ARRAY_SIZE(batch
->other_batches
); b
++) {
266 struct drm_i915_gem_exec_object2
*other_entry
=
267 find_validation_entry(batch
->other_batches
[b
], bo
);
269 /* If the buffer is referenced by another batch, and either batch
270 * intends to write it, then flush the other batch and synchronize.
272 * Consider these cases:
274 * 1. They read, we read => No synchronization required.
275 * 2. They read, we write => Synchronize (they need the old value)
276 * 3. They write, we read => Synchronize (we need their new value)
277 * 4. They write, we write => Synchronize (order writes)
279 * The read/read case is very common, as multiple batches usually
280 * share a streaming state buffer or shader assembly buffer, and
281 * we want to avoid synchronizing in this case.
284 ((other_entry
->flags
& EXEC_OBJECT_WRITE
) || writable
)) {
285 iris_batch_flush(batch
->other_batches
[b
]);
286 iris_batch_add_syncpt(batch
, batch
->other_batches
[b
]->last_syncpt
,
287 I915_EXEC_FENCE_WAIT
);
292 /* Now, take a reference and add it to the validation list. */
293 iris_bo_reference(bo
);
295 if (batch
->exec_count
== batch
->exec_array_size
) {
296 batch
->exec_array_size
*= 2;
298 realloc(batch
->exec_bos
,
299 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
300 batch
->validation_list
=
301 realloc(batch
->validation_list
,
302 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
305 batch
->validation_list
[batch
->exec_count
] =
306 (struct drm_i915_gem_exec_object2
) {
307 .handle
= bo
->gem_handle
,
308 .offset
= bo
->gtt_offset
,
309 .flags
= bo
->kflags
| (writable
? EXEC_OBJECT_WRITE
: 0),
312 bo
->index
= batch
->exec_count
;
313 batch
->exec_bos
[batch
->exec_count
] = bo
;
314 batch
->aperture_space
+= bo
->size
;
320 create_batch(struct iris_batch
*batch
)
322 struct iris_screen
*screen
= batch
->screen
;
323 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
325 batch
->bo
= iris_bo_alloc(bufmgr
, "command buffer",
326 BATCH_SZ
+ BATCH_RESERVED
, IRIS_MEMZONE_OTHER
);
327 batch
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
328 batch
->map
= iris_bo_map(NULL
, batch
->bo
, MAP_READ
| MAP_WRITE
);
329 batch
->map_next
= batch
->map
;
331 iris_use_pinned_bo(batch
, batch
->bo
, false);
335 iris_batch_reset(struct iris_batch
*batch
)
337 struct iris_screen
*screen
= batch
->screen
;
339 iris_bo_unreference(batch
->bo
);
340 batch
->primary_batch_size
= 0;
341 batch
->contains_draw
= false;
344 assert(batch
->bo
->index
== 0);
346 struct iris_syncpt
*syncpt
= iris_create_syncpt(screen
);
347 iris_batch_add_syncpt(batch
, syncpt
, I915_EXEC_FENCE_SIGNAL
);
348 iris_syncpt_reference(screen
, &syncpt
, NULL
);
350 iris_cache_sets_clear(batch
);
354 iris_batch_free(struct iris_batch
*batch
)
356 struct iris_screen
*screen
= batch
->screen
;
357 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
359 for (int i
= 0; i
< batch
->exec_count
; i
++) {
360 iris_bo_unreference(batch
->exec_bos
[i
]);
362 free(batch
->exec_bos
);
363 free(batch
->validation_list
);
365 ralloc_free(batch
->exec_fences
.mem_ctx
);
367 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
368 iris_syncpt_reference(screen
, s
, NULL
);
369 ralloc_free(batch
->syncpts
.mem_ctx
);
371 iris_syncpt_reference(screen
, &batch
->last_syncpt
, NULL
);
373 iris_bo_unreference(batch
->bo
);
376 batch
->map_next
= NULL
;
378 iris_destroy_hw_context(bufmgr
, batch
->hw_ctx_id
);
380 _mesa_hash_table_destroy(batch
->cache
.render
, NULL
);
381 _mesa_set_destroy(batch
->cache
.depth
, NULL
);
383 if (unlikely(INTEL_DEBUG
))
384 gen_batch_decode_ctx_finish(&batch
->decoder
);
388 * If we've chained to a secondary batch, or are getting near to the end,
389 * then flush. This should only be called between draws.
392 iris_batch_maybe_flush(struct iris_batch
*batch
, unsigned estimate
)
394 if (batch
->bo
!= batch
->exec_bos
[0] ||
395 iris_batch_bytes_used(batch
) + estimate
>= BATCH_SZ
) {
396 iris_batch_flush(batch
);
401 iris_chain_to_new_batch(struct iris_batch
*batch
)
403 /* We only support chaining a single time. */
404 assert(batch
->bo
== batch
->exec_bos
[0]);
406 uint32_t *cmd
= batch
->map_next
;
407 uint64_t *addr
= batch
->map_next
+ 4;
408 batch
->map_next
+= 8;
410 /* No longer held by batch->bo, still held by validation list */
411 iris_bo_unreference(batch
->bo
);
412 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
415 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
416 *cmd
= (0x31 << 23) | (1 << 8) | (3 - 2);
417 *addr
= batch
->bo
->gtt_offset
;
421 * Terminate a batch with MI_BATCH_BUFFER_END.
424 iris_finish_batch(struct iris_batch
*batch
)
426 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
427 uint32_t *map
= batch
->map_next
;
429 map
[0] = (0xA << 23);
431 batch
->map_next
+= 4;
433 if (batch
->bo
== batch
->exec_bos
[0])
434 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
438 * Submit the batch to the GPU via execbuffer2.
441 submit_batch(struct iris_batch
*batch
)
443 iris_bo_unmap(batch
->bo
);
445 /* The requirement for using I915_EXEC_NO_RELOC are:
447 * The addresses written in the objects must match the corresponding
448 * reloc.gtt_offset which in turn must match the corresponding
451 * Any render targets written to in the batch must be flagged with
454 * To avoid stalling, execobject.offset should match the current
455 * address of that object within the active context.
457 struct drm_i915_gem_execbuffer2 execbuf
= {
458 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
459 .buffer_count
= batch
->exec_count
,
460 .batch_start_offset
= 0,
461 /* This must be QWord aligned. */
462 .batch_len
= ALIGN(batch
->primary_batch_size
, 8),
463 .flags
= batch
->engine
|
465 I915_EXEC_BATCH_FIRST
|
466 I915_EXEC_HANDLE_LUT
,
467 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
470 if (num_fences(batch
)) {
471 execbuf
.flags
|= I915_EXEC_FENCE_ARRAY
;
472 execbuf
.num_cliprects
= num_fences(batch
);
473 execbuf
.cliprects_ptr
=
474 (uintptr_t)util_dynarray_begin(&batch
->exec_fences
);
477 int ret
= drm_ioctl(batch
->screen
->fd
,
478 DRM_IOCTL_I915_GEM_EXECBUFFER2
,
482 DBG("execbuf FAILED: errno = %d\n", -ret
);
483 fprintf(stderr
, "execbuf FAILED: errno = %d\n", -ret
);
486 DBG("execbuf succeeded\n");
489 for (int i
= 0; i
< batch
->exec_count
; i
++) {
490 struct iris_bo
*bo
= batch
->exec_bos
[i
];
495 iris_bo_unreference(bo
);
502 batch_name_to_string(enum iris_batch_name name
)
504 const char *names
[IRIS_BATCH_COUNT
] = {
505 [IRIS_BATCH_RENDER
] = "render",
506 [IRIS_BATCH_COMPUTE
] = "compute",
512 * Flush the batch buffer, submitting it to the GPU and resetting it so
513 * we're ready to emit the next batch.
515 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
516 * ownership of the fd.
518 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
519 * take ownership of the returned fd.
522 _iris_batch_flush(struct iris_batch
*batch
, const char *file
, int line
)
524 struct iris_screen
*screen
= batch
->screen
;
526 if (iris_batch_bytes_used(batch
) == 0)
529 iris_finish_batch(batch
);
531 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
532 int bytes_for_commands
= iris_batch_bytes_used(batch
);
533 int second_bytes
= 0;
534 if (batch
->bo
!= batch
->exec_bos
[0]) {
535 second_bytes
= bytes_for_commands
;
536 bytes_for_commands
+= batch
->primary_batch_size
;
538 fprintf(stderr
, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
539 "(cmds), %4d BOs (%0.1fMb aperture)\n",
540 file
, line
, batch_name_to_string(batch
->name
), batch
->hw_ctx_id
,
541 batch
->primary_batch_size
, second_bytes
,
542 100.0f
* bytes_for_commands
/ BATCH_SZ
,
544 (float) batch
->aperture_space
/ (1024 * 1024));
545 dump_fence_list(batch
);
546 dump_validation_list(batch
);
549 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
)) {
553 int ret
= submit_batch(batch
);
556 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
557 //iris_check_for_reset(ice);
559 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
560 dbg_printf("waiting for idle\n");
561 iris_bo_wait_rendering(batch
->bo
);
565 const bool color
= INTEL_DEBUG
& DEBUG_COLOR
;
566 fprintf(stderr
, "%siris: Failed to submit batchbuffer: %-80s%s\n",
567 color
? "\e[1;41m" : "", strerror(-ret
), color
? "\e[0m" : "");
572 batch
->exec_count
= 0;
573 batch
->aperture_space
= 0;
575 struct iris_syncpt
*syncpt
=
576 ((struct iris_syncpt
**) util_dynarray_begin(&batch
->syncpts
))[0];
577 iris_syncpt_reference(screen
, &batch
->last_syncpt
, syncpt
);
579 util_dynarray_foreach(&batch
->syncpts
, struct iris_syncpt
*, s
)
580 iris_syncpt_reference(screen
, s
, NULL
);
581 util_dynarray_clear(&batch
->syncpts
);
583 util_dynarray_clear(&batch
->exec_fences
);
585 /* Start a new batch buffer. */
586 iris_batch_reset(batch
);
590 * Does the current batch refer to the given BO?
592 * (In other words, is the BO in the current batch's validation list?)
595 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
597 return find_validation_entry(batch
, bo
) != NULL
;