2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "iris_batch.h"
26 #include "iris_bufmgr.h"
27 #include "iris_context.h"
29 #include "drm-uapi/i915_drm.h"
31 #include "util/hash_table.h"
33 #include "main/macros.h"
38 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
41 * Target sizes of the batch and state buffers. We create the initial
42 * buffers at these sizes, and flush when they're nearly full. If we
43 * underestimate how close we are to the end, and suddenly need more space
44 * in the middle of a draw, we can grow the buffers, and finish the draw.
45 * At that point, we'll be over our target size, so the next operation
46 * should flush. Each time we flush the batch, we recreate both buffers
47 * at the original target size, so it doesn't grow without bound.
49 #define BATCH_SZ (20 * 1024)
50 #define STATE_SZ (18 * 1024)
52 static void decode_batch(struct iris_batch
*batch
);
55 iris_batch_reset(struct iris_batch
*batch
);
58 dump_validation_list(struct iris_batch
*batch
)
60 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
62 for (int i
= 0; i
< batch
->exec_count
; i
++) {
63 uint64_t flags
= batch
->validation_list
[i
].flags
;
64 assert(batch
->validation_list
[i
].handle
==
65 batch
->exec_bos
[i
]->gem_handle
);
66 fprintf(stderr
, "[%2d]: %2d %-14s %p %-7s @ 0x%016llx (%"PRIu64
"B)\n",
68 batch
->validation_list
[i
].handle
,
69 batch
->exec_bos
[i
]->name
,
71 (flags
& EXEC_OBJECT_WRITE
) ? "(write)" : "",
72 batch
->validation_list
[i
].offset
,
73 batch
->exec_bos
[i
]->size
);
77 static struct gen_batch_decode_bo
78 decode_get_bo(void *v_batch
, uint64_t address
)
80 struct iris_batch
*batch
= v_batch
;
82 for (int i
= 0; i
< batch
->exec_count
; i
++) {
83 struct iris_bo
*bo
= batch
->exec_bos
[i
];
84 /* The decoder zeroes out the top 16 bits, so we need to as well */
85 uint64_t bo_address
= bo
->gtt_offset
& (~0ull >> 16);
87 if (address
>= bo_address
&& address
< bo_address
+ bo
->size
) {
88 return (struct gen_batch_decode_bo
) {
91 .map
= iris_bo_map(batch
->dbg
, bo
, MAP_READ
) +
92 (address
- bo_address
),
97 return (struct gen_batch_decode_bo
) { };
101 uint_key_compare(const void *a
, const void *b
)
107 uint_key_hash(const void *key
)
109 return (uintptr_t) key
;
113 create_batch_buffer(struct iris_bufmgr
*bufmgr
,
114 struct iris_batch_buffer
*buf
,
115 const char *name
, unsigned size
)
117 buf
->bo
= iris_bo_alloc(bufmgr
, name
, size
, IRIS_MEMZONE_OTHER
);
118 buf
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
119 buf
->map
= iris_bo_map(NULL
, buf
->bo
, MAP_READ
| MAP_WRITE
);
120 buf
->map_next
= buf
->map
;
124 iris_init_batch(struct iris_batch
*batch
,
125 struct iris_screen
*screen
,
126 struct iris_vtable
*vtbl
,
127 struct pipe_debug_callback
*dbg
,
130 batch
->screen
= screen
;
134 /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
135 assert((ring
& ~I915_EXEC_RING_MASK
) == 0);
136 assert(util_bitcount(ring
) == 1);
139 batch
->exec_count
= 0;
140 batch
->exec_array_size
= 100;
142 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
143 batch
->validation_list
=
144 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
146 batch
->cache
.render
= _mesa_hash_table_create(NULL
, _mesa_hash_pointer
,
147 _mesa_key_pointer_equal
);
148 batch
->cache
.depth
= _mesa_set_create(NULL
, _mesa_hash_pointer
,
149 _mesa_key_pointer_equal
);
150 if (unlikely(INTEL_DEBUG
)) {
152 _mesa_hash_table_create(NULL
, uint_key_hash
, uint_key_compare
);
154 const unsigned decode_flags
=
155 GEN_BATCH_DECODE_FULL
|
156 ((INTEL_DEBUG
& DEBUG_COLOR
) ? GEN_BATCH_DECODE_IN_COLOR
: 0) |
157 GEN_BATCH_DECODE_OFFSETS
|
158 GEN_BATCH_DECODE_FLOATS
;
160 gen_batch_decode_ctx_init(&batch
->decoder
, &screen
->devinfo
,
161 stderr
, decode_flags
, NULL
,
162 decode_get_bo
, NULL
, batch
);
165 iris_batch_reset(batch
);
168 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
171 add_exec_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
173 unsigned index
= READ_ONCE(bo
->index
);
175 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
178 /* May have been shared between multiple active batches */
179 for (index
= 0; index
< batch
->exec_count
; index
++) {
180 if (batch
->exec_bos
[index
] == bo
)
184 iris_bo_reference(bo
);
186 if (batch
->exec_count
== batch
->exec_array_size
) {
187 batch
->exec_array_size
*= 2;
189 realloc(batch
->exec_bos
,
190 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
191 batch
->validation_list
=
192 realloc(batch
->validation_list
,
193 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
196 batch
->validation_list
[batch
->exec_count
] =
197 (struct drm_i915_gem_exec_object2
) {
198 .handle
= bo
->gem_handle
,
199 .offset
= bo
->gtt_offset
,
203 bo
->index
= batch
->exec_count
;
204 batch
->exec_bos
[batch
->exec_count
] = bo
;
205 batch
->aperture_space
+= bo
->size
;
207 return batch
->exec_count
++;
211 iris_batch_reset(struct iris_batch
*batch
)
213 struct iris_screen
*screen
= batch
->screen
;
214 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
216 if (batch
->last_cmd_bo
!= NULL
) {
217 iris_bo_unreference(batch
->last_cmd_bo
);
218 batch
->last_cmd_bo
= NULL
;
220 batch
->last_cmd_bo
= batch
->cmdbuf
.bo
;
222 create_batch_buffer(bufmgr
, &batch
->cmdbuf
, "command buffer", BATCH_SZ
);
224 add_exec_bo(batch
, batch
->cmdbuf
.bo
);
225 assert(batch
->cmdbuf
.bo
->index
== 0);
227 if (batch
->state_sizes
)
228 _mesa_hash_table_clear(batch
->state_sizes
, NULL
);
230 iris_cache_sets_clear(batch
);
234 free_batch_buffer(struct iris_batch_buffer
*buf
)
236 iris_bo_unreference(buf
->bo
);
239 buf
->map_next
= NULL
;
243 iris_batch_free(struct iris_batch
*batch
)
245 for (int i
= 0; i
< batch
->exec_count
; i
++) {
246 iris_bo_unreference(batch
->exec_bos
[i
]);
248 free(batch
->exec_bos
);
249 free(batch
->validation_list
);
250 free_batch_buffer(&batch
->cmdbuf
);
252 iris_bo_unreference(batch
->last_cmd_bo
);
254 _mesa_hash_table_destroy(batch
->cache
.render
, NULL
);
255 _mesa_set_destroy(batch
->cache
.depth
, NULL
);
257 if (batch
->state_sizes
) {
258 _mesa_hash_table_destroy(batch
->state_sizes
, NULL
);
259 gen_batch_decode_ctx_finish(&batch
->decoder
);
264 buffer_bytes_used(struct iris_batch_buffer
*buf
)
266 return buf
->map_next
- buf
->map
;
270 require_buffer_space(struct iris_batch
*batch
,
271 struct iris_batch_buffer
*buf
,
273 unsigned flush_threshold
,
274 unsigned max_buffer_size
)
276 const unsigned required_bytes
= buffer_bytes_used(buf
) + size
;
278 if (!batch
->no_wrap
&& required_bytes
>= flush_threshold
) {
279 iris_batch_flush(batch
);
280 } else if (required_bytes
>= buf
->bo
->size
) {
281 assert(!"Can't grow");
287 iris_require_command_space(struct iris_batch
*batch
, unsigned size
)
289 require_buffer_space(batch
, &batch
->cmdbuf
, size
, BATCH_SZ
, MAX_BATCH_SIZE
);
293 iris_get_command_space(struct iris_batch
*batch
, unsigned bytes
)
295 iris_require_command_space(batch
, bytes
);
296 void *map
= batch
->cmdbuf
.map_next
;
297 batch
->cmdbuf
.map_next
+= bytes
;
302 iris_batch_emit(struct iris_batch
*batch
, const void *data
, unsigned size
)
304 void *map
= iris_get_command_space(batch
, size
);
305 memcpy(map
, data
, size
);
309 * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
312 * This function can emit state (say, to preserve registers that aren't saved
316 iris_finish_batch(struct iris_batch
*batch
)
318 batch
->no_wrap
= true;
322 /* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2
323 * requires our batch size to be QWord aligned, so we pad it out if
324 * necessary by emitting an extra MI_NOOP after the end.
326 const uint32_t MI_BATCH_BUFFER_END_AND_NOOP
[2] = { (0xA << 23), 0 };
327 const bool qword_aligned
= (buffer_bytes_used(&batch
->cmdbuf
) % 8) == 0;
328 iris_batch_emit(batch
, MI_BATCH_BUFFER_END_AND_NOOP
, qword_aligned
? 8 : 4);
330 batch
->no_wrap
= false;
334 submit_batch(struct iris_batch
*batch
, int in_fence_fd
, int *out_fence_fd
)
336 iris_bo_unmap(batch
->cmdbuf
.bo
);
338 /* The requirement for using I915_EXEC_NO_RELOC are:
340 * The addresses written in the objects must match the corresponding
341 * reloc.gtt_offset which in turn must match the corresponding
344 * Any render targets written to in the batch must be flagged with
347 * To avoid stalling, execobject.offset should match the current
348 * address of that object within the active context.
350 struct drm_i915_gem_execbuffer2 execbuf
= {
351 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
352 .buffer_count
= batch
->exec_count
,
353 .batch_start_offset
= 0,
354 .batch_len
= buffer_bytes_used(&batch
->cmdbuf
),
355 .flags
= batch
->ring
|
357 I915_EXEC_BATCH_FIRST
|
358 I915_EXEC_HANDLE_LUT
,
359 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
362 unsigned long cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2
;
364 if (in_fence_fd
!= -1) {
365 execbuf
.rsvd2
= in_fence_fd
;
366 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
369 if (out_fence_fd
!= NULL
) {
370 cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
;
372 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
375 int ret
= drm_ioctl(batch
->screen
->fd
, cmd
, &execbuf
);
378 DBG("execbuf FAILED: errno = %d\n", -ret
);
380 DBG("execbuf succeeded\n");
383 for (int i
= 0; i
< batch
->exec_count
; i
++) {
384 struct iris_bo
*bo
= batch
->exec_bos
[i
];
390 if (ret
== 0 && out_fence_fd
!= NULL
)
391 *out_fence_fd
= execbuf
.rsvd2
>> 32;
397 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
400 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
401 * of the returned fd.
404 _iris_batch_flush_fence(struct iris_batch
*batch
,
405 int in_fence_fd
, int *out_fence_fd
,
406 const char *file
, int line
)
408 if (buffer_bytes_used(&batch
->cmdbuf
) == 0)
411 /* Check that we didn't just wrap our batchbuffer at a bad time. */
412 assert(!batch
->no_wrap
);
414 iris_finish_batch(batch
);
416 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
417 int bytes_for_commands
= buffer_bytes_used(&batch
->cmdbuf
);
418 fprintf(stderr
, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%), "
419 "%4d BOs (%0.1fMb aperture)\n",
421 bytes_for_commands
, 100.0f
* bytes_for_commands
/ BATCH_SZ
,
423 (float) batch
->aperture_space
/ (1024 * 1024));
424 dump_validation_list(batch
);
427 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
430 int ret
= submit_batch(batch
, in_fence_fd
, out_fence_fd
);
437 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
438 //iris_check_for_reset(ice);
440 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
441 dbg_printf("waiting for idle\n");
442 iris_bo_wait_rendering(batch
->cmdbuf
.bo
);
445 /* Clean up after the batch we submitted and prepare for a new one. */
446 for (int i
= 0; i
< batch
->exec_count
; i
++) {
447 iris_bo_unreference(batch
->exec_bos
[i
]);
448 batch
->exec_bos
[i
] = NULL
;
450 batch
->exec_count
= 0;
451 batch
->aperture_space
= 0;
453 /* Start a new batch buffer. */
454 iris_batch_reset(batch
);
460 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
462 unsigned index
= READ_ONCE(bo
->index
);
463 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
466 for (int i
= 0; i
< batch
->exec_count
; i
++) {
467 if (batch
->exec_bos
[i
] == bo
)
473 /* This is the only way buffers get added to the validate list.
476 iris_use_pinned_bo(struct iris_batch
*batch
,
480 assert(bo
->kflags
& EXEC_OBJECT_PINNED
);
481 unsigned index
= add_exec_bo(batch
, bo
);
483 batch
->validation_list
[index
].flags
|= EXEC_OBJECT_WRITE
;
487 decode_batch(struct iris_batch
*batch
)
489 gen_print_batch(&batch
->decoder
, batch
->cmdbuf
.map
,
490 buffer_bytes_used(&batch
->cmdbuf
),
491 batch
->cmdbuf
.bo
->gtt_offset
);