2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "iris_batch.h"
26 #include "iris_bufmgr.h"
27 #include "iris_context.h"
29 #include "drm-uapi/i915_drm.h"
31 #include "util/hash_table.h"
32 #include "main/macros.h"
37 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
40 * Target sizes of the batch and state buffers. We create the initial
41 * buffers at these sizes, and flush when they're nearly full. If we
42 * underestimate how close we are to the end, and suddenly need more space
43 * in the middle of a draw, we can grow the buffers, and finish the draw.
44 * At that point, we'll be over our target size, so the next operation
45 * should flush. Each time we flush the batch, we recreate both buffers
46 * at the original target size, so it doesn't grow without bound.
48 #define BATCH_SZ (20 * 1024)
49 #define STATE_SZ (18 * 1024)
51 static void decode_batch(struct iris_batch
*batch
);
54 iris_batch_reset(struct iris_batch
*batch
);
57 dump_validation_list(struct iris_batch
*batch
)
59 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
61 for (int i
= 0; i
< batch
->exec_count
; i
++) {
62 uint64_t flags
= batch
->validation_list
[i
].flags
;
63 assert(batch
->validation_list
[i
].handle
==
64 batch
->exec_bos
[i
]->gem_handle
);
65 fprintf(stderr
, "[%2d]: %2d %-14s %p %s%-7s @ 0x%016llx (%"PRIu64
"B)\n",
67 batch
->validation_list
[i
].handle
,
68 batch
->exec_bos
[i
]->name
,
70 (flags
& EXEC_OBJECT_SUPPORTS_48B_ADDRESS
) ? "(48b" : "(32b",
71 (flags
& EXEC_OBJECT_WRITE
) ? " write)" : ")",
72 batch
->validation_list
[i
].offset
,
73 batch
->exec_bos
[i
]->size
);
77 static struct gen_batch_decode_bo
78 decode_get_bo(void *v_batch
, uint64_t address
)
80 struct iris_batch
*batch
= v_batch
;
82 for (int i
= 0; i
< batch
->exec_count
; i
++) {
83 struct iris_bo
*bo
= batch
->exec_bos
[i
];
84 if (address
>= bo
->gtt_offset
&&
85 address
< bo
->gtt_offset
+ bo
->size
) {
86 return (struct gen_batch_decode_bo
) {
89 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
90 (address
- bo
->gtt_offset
),
95 return (struct gen_batch_decode_bo
) { };
99 uint_key_compare(const void *a
, const void *b
)
105 uint_key_hash(const void *key
)
107 return (uintptr_t) key
;
111 create_batch_buffer(struct iris_bufmgr
*bufmgr
,
112 struct iris_batch_buffer
*buf
,
113 const char *name
, unsigned size
)
115 buf
->bo
= iris_bo_alloc(bufmgr
, name
, size
, IRIS_MEMZONE_OTHER
);
116 buf
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
117 buf
->map
= iris_bo_map(NULL
, buf
->bo
, MAP_READ
| MAP_WRITE
);
118 buf
->map_next
= buf
->map
;
122 iris_init_batch(struct iris_batch
*batch
,
123 struct iris_screen
*screen
,
124 struct pipe_debug_callback
*dbg
,
127 batch
->screen
= screen
;
130 /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
131 assert((ring
& ~I915_EXEC_RING_MASK
) == 0);
132 assert(util_bitcount(ring
) == 1);
135 batch
->exec_count
= 0;
136 batch
->exec_array_size
= 100;
138 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
139 batch
->validation_list
=
140 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
142 if (unlikely(INTEL_DEBUG
)) {
144 _mesa_hash_table_create(NULL
, uint_key_hash
, uint_key_compare
);
146 const unsigned decode_flags
=
147 GEN_BATCH_DECODE_FULL
|
148 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
149 GEN_BATCH_DECODE_OFFSETS
|
150 GEN_BATCH_DECODE_FLOATS
;
152 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
153 stderr
, decode_flags
, NULL
,
154 decode_get_bo
, NULL
, batch
);
157 iris_batch_reset(batch
);
160 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
163 add_exec_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
165 unsigned index
= READ_ONCE(bo
->index
);
167 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
170 /* May have been shared between multiple active batches */
171 for (index
= 0; index
< batch
->exec_count
; index
++) {
172 if (batch
->exec_bos
[index
] == bo
)
176 iris_bo_reference(bo
);
178 if (batch
->exec_count
== batch
->exec_array_size
) {
179 batch
->exec_array_size
*= 2;
181 realloc(batch
->exec_bos
,
182 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
183 batch
->validation_list
=
184 realloc(batch
->validation_list
,
185 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
188 batch
->validation_list
[batch
->exec_count
] =
189 (struct drm_i915_gem_exec_object2
) {
190 .handle
= bo
->gem_handle
,
191 .offset
= bo
->gtt_offset
,
195 bo
->index
= batch
->exec_count
;
196 batch
->exec_bos
[batch
->exec_count
] = bo
;
197 batch
->aperture_space
+= bo
->size
;
199 return batch
->exec_count
++;
203 iris_batch_reset(struct iris_batch
*batch
)
205 struct iris_screen
*screen
= batch
->screen
;
206 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
208 if (batch
->last_cmd_bo
!= NULL
) {
209 iris_bo_unreference(batch
->last_cmd_bo
);
210 batch
->last_cmd_bo
= NULL
;
212 batch
->last_cmd_bo
= batch
->cmdbuf
.bo
;
214 create_batch_buffer(bufmgr
, &batch
->cmdbuf
, "command buffer", BATCH_SZ
);
216 add_exec_bo(batch
, batch
->cmdbuf
.bo
);
217 assert(batch
->cmdbuf
.bo
->index
== 0);
219 if (batch
->state_sizes
)
220 _mesa_hash_table_clear(batch
->state_sizes
, NULL
);
222 if (batch
->ring
== I915_EXEC_RENDER
)
223 batch
->emit_state_base_address(batch
);
227 iris_batch_reset_and_clear_render_cache(struct iris_batch
*batch
)
229 iris_batch_reset(batch
);
230 // XXX: iris_render_cache_set_clear(batch);
234 free_batch_buffer(struct iris_batch_buffer
*buf
)
236 iris_bo_unreference(buf
->bo
);
239 buf
->map_next
= NULL
;
243 iris_batch_free(struct iris_batch
*batch
)
245 for (int i
= 0; i
< batch
->exec_count
; i
++) {
246 iris_bo_unreference(batch
->exec_bos
[i
]);
248 free(batch
->exec_bos
);
249 free(batch
->validation_list
);
250 free_batch_buffer(&batch
->cmdbuf
);
252 iris_bo_unreference(batch
->last_cmd_bo
);
254 if (batch
->state_sizes
) {
255 _mesa_hash_table_destroy(batch
->state_sizes
, NULL
);
256 gen_batch_decode_ctx_finish(&batch
->decoder
);
261 * Finish copying the old batch/state buffer's contents to the new one
262 * after we tried to "grow" the buffer in an earlier operation.
265 finish_growing_bos(struct iris_batch_buffer
*buf
)
267 struct iris_bo
*old_bo
= buf
->partial_bo
;
271 void *old_map
= old_bo
->map_cpu
? old_bo
->map_cpu
: old_bo
->map_wc
;
272 memcpy(buf
->map
, old_map
, buf
->partial_bytes
);
274 buf
->partial_bo
= NULL
;
275 buf
->partial_bytes
= 0;
277 iris_bo_unreference(old_bo
);
281 buffer_bytes_used(struct iris_batch_buffer
*buf
)
283 return buf
->map_next
- buf
->map
;
287 * Grow either the batch or state buffer to a new larger size.
289 * We can't actually grow buffers, so we allocate a new one, copy over
290 * the existing contents, and update our lists to refer to the new one.
292 * Note that this is only temporary - each new batch recreates the buffers
293 * at their original target size (BATCH_SZ or STATE_SZ).
296 grow_buffer(struct iris_batch
*batch
,
297 struct iris_batch_buffer
*buf
,
300 struct iris_bufmgr
*bufmgr
= batch
->screen
->bufmgr
;
301 struct iris_bo
*bo
= buf
->bo
;
303 perf_debug(batch
->dbg
, "Growing %s - ran out of space\n", bo
->name
);
305 if (buf
->partial_bo
) {
306 /* We've already grown once, and now we need to do it again.
307 * Finish our last grow operation so we can start a new one.
308 * This should basically never happen.
310 perf_debug(batch
->dbg
, "Had to grow multiple times");
311 finish_growing_bos(buf
);
314 const unsigned existing_bytes
= buffer_bytes_used(buf
);
316 struct iris_bo
*new_bo
=
317 iris_bo_alloc(bufmgr
, bo
->name
, new_size
, IRIS_MEMZONE_OTHER
);
319 buf
->map
= iris_bo_map(NULL
, new_bo
, MAP_READ
| MAP_WRITE
);
320 buf
->map_next
= buf
->map
+ existing_bytes
;
322 /* Try to put the new BO at the same GTT offset as the old BO (which
323 * we're throwing away, so it doesn't need to be there).
325 * This guarantees that our relocations continue to work: values we've
326 * already written into the buffer, values we're going to write into the
327 * buffer, and the validation/relocation lists all will match.
329 * Also preserve kflags for EXEC_OBJECT_CAPTURE.
331 new_bo
->gtt_offset
= bo
->gtt_offset
;
332 new_bo
->index
= bo
->index
;
333 new_bo
->kflags
= bo
->kflags
;
335 /* Batch/state buffers are per-context, and if we've run out of space,
336 * we must have actually used them before, so...they will be in the list.
338 assert(bo
->index
< batch
->exec_count
);
339 assert(batch
->exec_bos
[bo
->index
] == bo
);
341 /* Update the validation list to use the new BO. */
342 batch
->exec_bos
[bo
->index
] = new_bo
;
343 batch
->validation_list
[bo
->index
].handle
= new_bo
->gem_handle
;
345 /* Exchange the two BOs...without breaking pointers to the old BO.
347 * Consider this scenario:
349 * 1. Somebody calls iris_state_batch() to get a region of memory, and
350 * and then creates a iris_address pointing to iris->batch.state.bo.
351 * 2. They then call iris_state_batch() a second time, which happens to
352 * grow and replace the state buffer. They then try to emit a
353 * relocation to their first section of memory.
355 * If we replace the iris->batch.state.bo pointer at step 2, we would
356 * break the address created in step 1. They'd have a pointer to the
357 * old destroyed BO. Emitting a relocation would add this dead BO to
358 * the validation list...causing /both/ statebuffers to be in the list,
359 * and all kinds of disasters.
361 * This is not a contrived case - BLORP vertex data upload hits this.
363 * There are worse scenarios too. Fences for GL sync objects reference
364 * iris->batch.batch.bo. If we replaced the batch pointer when growing,
365 * we'd need to chase down every fence and update it to point to the
366 * new BO. Otherwise, it would refer to a "batch" that never actually
367 * gets submitted, and would fail to trigger.
369 * To work around both of these issues, we transmutate the buffers in
370 * place, making the existing struct iris_bo represent the new buffer,
371 * and "new_bo" represent the old BO. This is highly unusual, but it
372 * seems like a necessary evil.
374 * We also defer the memcpy of the existing batch's contents. Callers
375 * may make multiple iris_state_batch calls, and retain pointers to the
376 * old BO's map. We'll perform the memcpy in finish_growing_bo() when
377 * we finally submit the batch, at which point we've finished uploading
378 * state, and nobody should have any old references anymore.
380 * To do that, we keep a reference to the old BO in grow->partial_bo,
381 * and store the number of bytes to copy in grow->partial_bytes. We
382 * can monkey with the refcounts directly without atomics because these
383 * are per-context BOs and they can only be touched by this thread.
385 assert(new_bo
->refcount
== 1);
386 new_bo
->refcount
= bo
->refcount
;
390 memcpy(&tmp
, bo
, sizeof(struct iris_bo
));
391 memcpy(bo
, new_bo
, sizeof(struct iris_bo
));
392 memcpy(new_bo
, &tmp
, sizeof(struct iris_bo
));
394 buf
->partial_bo
= new_bo
; /* the one reference of the OLD bo */
395 buf
->partial_bytes
= existing_bytes
;
399 require_buffer_space(struct iris_batch
*batch
,
400 struct iris_batch_buffer
*buf
,
402 unsigned flush_threshold
,
403 unsigned max_buffer_size
)
405 const unsigned required_bytes
= buffer_bytes_used(buf
) + size
;
407 if (!batch
->no_wrap
&& required_bytes
>= flush_threshold
) {
408 iris_batch_flush(batch
);
409 } else if (required_bytes
>= buf
->bo
->size
) {
410 grow_buffer(batch
, buf
,
411 MIN2(buf
->bo
->size
+ buf
->bo
->size
/ 2, max_buffer_size
));
412 assert(required_bytes
< buf
->bo
->size
);
418 iris_require_command_space(struct iris_batch
*batch
, unsigned size
)
420 require_buffer_space(batch
, &batch
->cmdbuf
, size
, BATCH_SZ
, MAX_BATCH_SIZE
);
424 iris_batch_emit(struct iris_batch
*batch
, const void *data
, unsigned size
)
426 iris_require_command_space(batch
, size
);
427 memcpy(batch
->cmdbuf
.map_next
, data
, size
);
428 batch
->cmdbuf
.map_next
+= size
;
432 * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
435 * This function can emit state (say, to preserve registers that aren't saved
439 iris_finish_batch(struct iris_batch
*batch
)
441 batch
->no_wrap
= true;
445 /* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2
446 * requires our batch size to be QWord aligned, so we pad it out if
447 * necessary by emitting an extra MI_NOOP after the end.
449 const uint32_t MI_BATCH_BUFFER_END_AND_NOOP
[2] = { (0xA << 23), 0 };
450 const bool qword_aligned
= (buffer_bytes_used(&batch
->cmdbuf
) % 8) == 0;
451 iris_batch_emit(batch
, MI_BATCH_BUFFER_END_AND_NOOP
, qword_aligned
? 8 : 4);
453 batch
->no_wrap
= false;
457 submit_batch(struct iris_batch
*batch
, int in_fence_fd
, int *out_fence_fd
)
459 iris_bo_unmap(batch
->cmdbuf
.bo
);
461 /* The requirement for using I915_EXEC_NO_RELOC are:
463 * The addresses written in the objects must match the corresponding
464 * reloc.gtt_offset which in turn must match the corresponding
467 * Any render targets written to in the batch must be flagged with
470 * To avoid stalling, execobject.offset should match the current
471 * address of that object within the active context.
473 struct drm_i915_gem_execbuffer2 execbuf
= {
474 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
475 .buffer_count
= batch
->exec_count
,
476 .batch_start_offset
= 0,
477 .batch_len
= buffer_bytes_used(&batch
->cmdbuf
),
478 .flags
= batch
->ring
|
480 I915_EXEC_BATCH_FIRST
|
481 I915_EXEC_HANDLE_LUT
,
482 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
485 unsigned long cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2
;
487 if (in_fence_fd
!= -1) {
488 execbuf
.rsvd2
= in_fence_fd
;
489 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
492 if (out_fence_fd
!= NULL
) {
493 cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
;
495 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
499 int ret
= drm_ioctl(batch
->screen
->fd
, cmd
, &execbuf
);
502 DBG("execbuf FAILED: errno = %d\n", -ret
);
504 DBG("execbuf succeeded\n");
508 fprintf(stderr
, "execbuf disabled for now\n");
511 for (int i
= 0; i
< batch
->exec_count
; i
++) {
512 struct iris_bo
*bo
= batch
->exec_bos
[i
];
517 /* Update iris_bo::gtt_offset */
518 if (batch
->validation_list
[i
].offset
!= bo
->gtt_offset
) {
519 DBG("BO %d migrated: 0x%" PRIx64
" -> 0x%llx\n",
520 bo
->gem_handle
, bo
->gtt_offset
,
521 batch
->validation_list
[i
].offset
);
522 bo
->gtt_offset
= batch
->validation_list
[i
].offset
;
526 if (ret
== 0 && out_fence_fd
!= NULL
)
527 *out_fence_fd
= execbuf
.rsvd2
>> 32;
533 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
536 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
537 * of the returned fd.
540 _iris_batch_flush_fence(struct iris_batch
*batch
,
541 int in_fence_fd
, int *out_fence_fd
,
542 const char *file
, int line
)
544 if (buffer_bytes_used(&batch
->cmdbuf
) == 0)
547 /* Check that we didn't just wrap our batchbuffer at a bad time. */
548 assert(!batch
->no_wrap
);
550 iris_finish_batch(batch
);
552 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
553 int bytes_for_commands
= buffer_bytes_used(&batch
->cmdbuf
);
554 fprintf(stderr
, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%), "
555 "%4d BOs (%0.1fMb aperture)\n",
557 bytes_for_commands
, 100.0f
* bytes_for_commands
/ BATCH_SZ
,
559 (float) batch
->aperture_space
/ (1024 * 1024));
560 dump_validation_list(batch
);
563 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
566 int ret
= submit_batch(batch
, in_fence_fd
, out_fence_fd
);
573 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
574 //iris_check_for_reset(ice);
576 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
577 dbg_printf("waiting for idle\n");
578 iris_bo_wait_rendering(batch
->cmdbuf
.bo
);
581 /* Clean up after the batch we submitted and prepare for a new one. */
582 for (int i
= 0; i
< batch
->exec_count
; i
++) {
583 iris_bo_unreference(batch
->exec_bos
[i
]);
584 batch
->exec_bos
[i
] = NULL
;
586 batch
->exec_count
= 0;
587 batch
->aperture_space
= 0;
589 /* Start a new batch buffer. */
590 iris_batch_reset_and_clear_render_cache(batch
);
596 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
598 unsigned index
= READ_ONCE(bo
->index
);
599 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
602 for (int i
= 0; i
< batch
->exec_count
; i
++) {
603 if (batch
->exec_bos
[i
] == bo
)
609 /* This is the only way buffers get added to the validate list.
612 iris_use_pinned_bo(struct iris_batch
*batch
,
616 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
617 unsigned index
= add_exec_bo(batch
, bo
);
619 batch
->validation_list
[index
].flags
|= EXEC_OBJECT_WRITE
;
623 decode_batch(struct iris_batch
*batch
)
625 gen_print_batch(&batch
->decoder
, batch
->cmdbuf
.map
,
626 buffer_bytes_used(&batch
->cmdbuf
),
627 batch
->cmdbuf
.bo
->gtt_offset
);