2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
26 * Batchbuffer and command submission module.
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
44 #include "drm-uapi/i915_drm.h"
46 #include "util/hash_table.h"
48 #include "main/macros.h"
53 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
55 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
56 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
57 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
59 #define BATCH_RESERVED 16
62 iris_batch_reset(struct iris_batch
*batch
);
65 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
68 dump_validation_list(struct iris_batch
*batch
)
70 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
72 for (int i
= 0; i
< batch
->exec_count
; i
++) {
73 uint64_t flags
= batch
->validation_list
[i
].flags
;
74 assert(batch
->validation_list
[i
].handle
==
75 batch
->exec_bos
[i
]->gem_handle
);
76 fprintf(stderr
, "[%2d]: %2d %-14s %p %-7s @ 0x%016llx (%"PRIu64
"B) - %d refs\n",
78 batch
->validation_list
[i
].handle
,
79 batch
->exec_bos
[i
]->name
,
81 (flags
& EXEC_OBJECT_WRITE
) ? "(write)" : "",
82 batch
->validation_list
[i
].offset
,
83 batch
->exec_bos
[i
]->size
,
84 batch
->exec_bos
[i
]->refcount
);
89 * Return BO information to the batch decoder (for debugging).
91 static struct gen_batch_decode_bo
92 decode_get_bo(void *v_batch
, uint64_t address
)
94 struct iris_batch
*batch
= v_batch
;
96 for (int i
= 0; i
< batch
->exec_count
; i
++) {
97 struct iris_bo
*bo
= batch
->exec_bos
[i
];
98 /* The decoder zeroes out the top 16 bits, so we need to as well */
99 uint64_t bo_address
= bo
->gtt_offset
& (~0ull >> 16);
101 if (address
>= bo_address
&& address
< bo_address
+ bo
->size
) {
102 return (struct gen_batch_decode_bo
) {
105 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
106 (address
- bo_address
),
111 return (struct gen_batch_decode_bo
) { };
115 * Decode the current batch.
118 decode_batch(struct iris_batch
*batch
)
120 void *map
= iris_bo_map(batch
->dbg
, batch
->exec_bos
[0], MAP_READ
);
121 gen_print_batch(&batch
->decoder
, map
, batch
->primary_batch_size
,
122 batch
->exec_bos
[0]->gtt_offset
);
126 uint_key_compare(const void *a
, const void *b
)
132 uint_key_hash(const void *key
)
134 return (uintptr_t) key
;
138 iris_init_batch(struct iris_batch
*batch
,
139 struct iris_screen
*screen
,
140 struct iris_vtable
*vtbl
,
141 struct pipe_debug_callback
*dbg
,
142 struct iris_batch
**all_batches
,
146 batch
->screen
= screen
;
151 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
152 assert((engine
& ~I915_EXEC_RING_MASK
) == 0);
153 assert(util_bitcount(engine
) == 1);
154 batch
->engine
= engine
;
156 batch
->hw_ctx_id
= iris_create_hw_context(screen
->bufmgr
);
157 assert(batch
->hw_ctx_id
);
159 batch
->exec_count
= 0;
160 batch
->exec_array_size
= 100;
162 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
163 batch
->validation_list
=
164 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
166 batch
->cache
.render
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
167 _mesa_key_pointer_equal
);
168 batch
->cache
.depth
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
169 _mesa_key_pointer_equal
);
171 memset(batch
->other_batches
, 0, sizeof(batch
->other_batches
));
173 for (int i
= 0, j
= 0; i
< IRIS_BATCH_COUNT
; i
++) {
174 if (all_batches
[i
] != batch
)
175 batch
->other_batches
[j
++] = all_batches
[i
];
178 if (unlikely(INTEL_DEBUG
)) {
180 _mesa_hash_table_create(NULL
, uint_key_hash
, uint_key_compare
);
182 const unsigned decode_flags
=
183 GEN_BATCH_DECODE_FULL
|
184 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
185 GEN_BATCH_DECODE_OFFSETS
|
186 GEN_BATCH_DECODE_FLOATS
;
188 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
189 stderr
, decode_flags
, NULL
,
190 decode_get_bo
, NULL
, batch
);
191 batch
->decoder
.max_vbo_decoded_lines
= 32;
194 iris_batch_reset(batch
);
197 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
199 static struct drm_i915_gem_exec_object2
*
200 find_validation_entry(struct iris_batch
*batch
, struct iris_bo
*bo
)
202 unsigned index
= READ_ONCE(bo
->index
);
204 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
205 return &batch
->validation_list
[index
];
207 /* May have been shared between multiple active batches */
208 for (index
= 0; index
< batch
->exec_count
; index
++) {
209 if (batch
->exec_bos
[index
] == bo
)
210 return &batch
->validation_list
[index
];
217 * Add a buffer to the current batch's validation list.
219 * You must call this on any BO you wish to use in this batch, to ensure
220 * that it's resident when the GPU commands execute.
223 iris_use_pinned_bo(struct iris_batch
*batch
,
227 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
229 /* Never mark the workaround BO with EXEC_OBJECT_WRITE. We don't care
230 * about the order of any writes to that buffer, and marking it writable
231 * would introduce data dependencies between multiple batches which share
234 if (bo
== batch
->screen
->workaround_bo
)
237 struct drm_i915_gem_exec_object2
*existing_entry
=
238 find_validation_entry(batch
, bo
);
240 if (existing_entry
) {
241 /* The BO is already in the validation list; mark it writable */
243 existing_entry
->flags
|= EXEC_OBJECT_WRITE
;
248 /* This is the first time our batch has seen this BO. Before we use it,
249 * we may need to flush and synchronize with other batches.
251 for (int b
= 0; b
< ARRAY_SIZE(batch
->other_batches
); b
++) {
252 struct drm_i915_gem_exec_object2
*other_entry
=
253 find_validation_entry(batch
->other_batches
[b
], bo
);
255 /* If the buffer is referenced by another batch, and either batch
256 * intends to write it, then flush the other batch and synchronize.
258 * Consider these cases:
260 * 1. They read, we read => No synchronization required.
261 * 2. They read, we write => Synchronize (they need the old value)
262 * 3. They write, we read => Synchronize (we need their new value)
263 * 4. They write, we write => Synchronize (order writes)
265 * The read/read case is very common, as multiple batches usually
266 * share a streaming state buffer or shader assembly buffer, and
267 * we want to avoid synchronizing in this case.
270 ((other_entry
->flags
& EXEC_OBJECT_WRITE
) || writable
)) {
271 iris_batch_flush(batch
->other_batches
[b
]);
275 /* Now, take a reference and add it to the validation list. */
276 iris_bo_reference(bo
);
278 if (batch
->exec_count
== batch
->exec_array_size
) {
279 batch
->exec_array_size
*= 2;
281 realloc(batch
->exec_bos
,
282 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
283 batch
->validation_list
=
284 realloc(batch
->validation_list
,
285 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
288 batch
->validation_list
[batch
->exec_count
] =
289 (struct drm_i915_gem_exec_object2
) {
290 .handle
= bo
->gem_handle
,
291 .offset
= bo
->gtt_offset
,
292 .flags
= bo
->kflags
| (writable
? EXEC_OBJECT_WRITE
: 0),
295 bo
->index
= batch
->exec_count
;
296 batch
->exec_bos
[batch
->exec_count
] = bo
;
297 batch
->aperture_space
+= bo
->size
;
303 create_batch(struct iris_batch
*batch
)
305 struct iris_screen
*screen
= batch
->screen
;
306 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
308 batch
->bo
= iris_bo_alloc(bufmgr
, "command buffer",
309 BATCH_SZ
+ BATCH_RESERVED
, IRIS_MEMZONE_OTHER
);
310 batch
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
311 batch
->map
= iris_bo_map(NULL
, batch
->bo
, MAP_READ
| MAP_WRITE
);
312 batch
->map_next
= batch
->map
;
314 iris_use_pinned_bo(batch
, batch
->bo
, false);
318 iris_batch_reset(struct iris_batch
*batch
)
320 if (batch
->last_bo
!= NULL
) {
321 iris_bo_unreference(batch
->last_bo
);
322 batch
->last_bo
= NULL
;
324 batch
->last_bo
= batch
->bo
;
325 batch
->primary_batch_size
= 0;
326 batch
->contains_draw
= false;
329 assert(batch
->bo
->index
== 0);
331 if (batch
->state_sizes
)
332 _mesa_hash_table_clear(batch
->state_sizes
, NULL
);
334 iris_cache_sets_clear(batch
);
338 iris_batch_free(struct iris_batch
*batch
)
340 struct iris_screen
*screen
= batch
->screen
;
341 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
343 for (int i
= 0; i
< batch
->exec_count
; i
++) {
344 iris_bo_unreference(batch
->exec_bos
[i
]);
346 free(batch
->exec_bos
);
347 free(batch
->validation_list
);
348 iris_bo_unreference(batch
->bo
);
351 batch
->map_next
= NULL
;
353 iris_bo_unreference(batch
->last_bo
);
355 iris_destroy_hw_context(bufmgr
, batch
->hw_ctx_id
);
357 _mesa_hash_table_destroy(batch
->cache
.render
, NULL
);
358 _mesa_set_destroy(batch
->cache
.depth
, NULL
);
360 if (batch
->state_sizes
) {
361 _mesa_hash_table_destroy(batch
->state_sizes
, NULL
);
362 gen_batch_decode_ctx_finish(&batch
->decoder
);
367 * If we've chained to a secondary batch, or are getting near to the end,
368 * then flush. This should only be called between draws.
371 iris_batch_maybe_flush(struct iris_batch
*batch
, unsigned estimate
)
373 if (batch
->bo
!= batch
->exec_bos
[0] ||
374 iris_batch_bytes_used(batch
) + estimate
>= BATCH_SZ
) {
375 iris_batch_flush(batch
);
380 iris_chain_to_new_batch(struct iris_batch
*batch
)
382 /* We only support chaining a single time. */
383 assert(batch
->bo
== batch
->exec_bos
[0]);
385 uint32_t *cmd
= batch
->map_next
;
386 uint64_t *addr
= batch
->map_next
+ 4;
387 batch
->map_next
+= 8;
389 /* No longer held by batch->bo, still held by validation list */
390 iris_bo_unreference(batch
->bo
);
391 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
394 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
395 *cmd
= (0x31 << 23) | (1 << 8) | (3 - 2);
396 *addr
= batch
->bo
->gtt_offset
;
400 * Terminate a batch with MI_BATCH_BUFFER_END.
403 iris_finish_batch(struct iris_batch
*batch
)
407 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
408 uint32_t *map
= batch
->map_next
;
410 map
[0] = (0xA << 23);
412 batch
->map_next
+= 4;
414 if (batch
->bo
== batch
->exec_bos
[0])
415 batch
->primary_batch_size
= iris_batch_bytes_used(batch
);
419 * Submit the batch to the GPU via execbuffer2.
422 submit_batch(struct iris_batch
*batch
, int in_fence_fd
, int *out_fence_fd
)
424 iris_bo_unmap(batch
->bo
);
426 /* The requirement for using I915_EXEC_NO_RELOC are:
428 * The addresses written in the objects must match the corresponding
429 * reloc.gtt_offset which in turn must match the corresponding
432 * Any render targets written to in the batch must be flagged with
435 * To avoid stalling, execobject.offset should match the current
436 * address of that object within the active context.
438 struct drm_i915_gem_execbuffer2 execbuf
= {
439 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
440 .buffer_count
= batch
->exec_count
,
441 .batch_start_offset
= 0,
442 /* This must be QWord aligned. */
443 .batch_len
= ALIGN(batch
->primary_batch_size
, 8),
444 .flags
= batch
->engine
|
446 I915_EXEC_BATCH_FIRST
|
447 I915_EXEC_HANDLE_LUT
,
448 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
451 unsigned long cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2
;
453 if (in_fence_fd
!= -1) {
454 execbuf
.rsvd2
= in_fence_fd
;
455 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
458 if (out_fence_fd
!= NULL
) {
459 cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
;
461 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
464 int ret
= drm_ioctl(batch
->screen
->fd
, cmd
, &execbuf
);
467 DBG("execbuf FAILED: errno = %d\n", -ret
);
468 fprintf(stderr
, "execbuf FAILED: errno = %d\n", -ret
);
471 DBG("execbuf succeeded\n");
474 for (int i
= 0; i
< batch
->exec_count
; i
++) {
475 struct iris_bo
*bo
= batch
->exec_bos
[i
];
481 if (ret
== 0 && out_fence_fd
!= NULL
)
482 *out_fence_fd
= execbuf
.rsvd2
>> 32;
488 * Flush the batch buffer, submitting it to the GPU and resetting it so
489 * we're ready to emit the next batch.
491 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
492 * ownership of the fd.
494 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
495 * take ownership of the returned fd.
498 _iris_batch_flush_fence(struct iris_batch
*batch
,
499 int in_fence_fd
, int *out_fence_fd
,
500 const char *file
, int line
)
502 if (iris_batch_bytes_used(batch
) == 0)
505 iris_finish_batch(batch
);
507 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
508 int bytes_for_commands
= iris_batch_bytes_used(batch
);
509 int second_bytes
= 0;
510 if (batch
->bo
!= batch
->exec_bos
[0]) {
511 second_bytes
= bytes_for_commands
;
512 bytes_for_commands
+= batch
->primary_batch_size
;
514 fprintf(stderr
, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
515 "(cmds), %4d BOs (%0.1fMb aperture)\n",
516 file
, line
, batch
->name
, batch
->hw_ctx_id
,
517 batch
->primary_batch_size
, second_bytes
,
518 100.0f
* bytes_for_commands
/ BATCH_SZ
,
520 (float) batch
->aperture_space
/ (1024 * 1024));
521 dump_validation_list(batch
);
524 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
)) {
528 int ret
= submit_batch(batch
, in_fence_fd
, out_fence_fd
);
533 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
534 //iris_check_for_reset(ice);
536 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
537 dbg_printf("waiting for idle\n");
538 iris_bo_wait_rendering(batch
->bo
);
542 const bool color
= INTEL_DEBUG
& DEBUG_COLOR
;
543 fprintf(stderr
, "%siris: Failed to submit batchbuffer: %-80s%s\n",
544 color
? "\e[1;41m" : "", strerror(-ret
), color
? "\e[0m" : "");
549 /* Clean up after the batch we submitted and prepare for a new one. */
550 for (int i
= 0; i
< batch
->exec_count
; i
++) {
551 iris_bo_unreference(batch
->exec_bos
[i
]);
552 batch
->exec_bos
[i
] = NULL
;
554 batch
->exec_count
= 0;
555 batch
->aperture_space
= 0;
557 /* Start a new batch buffer. */
558 iris_batch_reset(batch
);
564 * Does the current batch refer to the given BO?
566 * (In other words, is the BO in the current batch's validation list?)
569 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
571 return find_validation_entry(batch
, bo
) != NULL
;