iris: Combine iris_use_pinned_bo and add_exec_bo
[mesa.git] / src / gallium / drivers / iris / iris_batch.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_batch.c
25 *
26 * Batchbuffer and command submission module.
27 *
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
31 *
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
38 */
39
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
43
44 #include "drm-uapi/i915_drm.h"
45
46 #include "util/hash_table.h"
47 #include "util/set.h"
48 #include "main/macros.h"
49
50 #include <errno.h>
51 #include <xf86drm.h>
52
53 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
54
55 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
56 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
57 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
58 */
59 #define BATCH_RESERVED 16
60
61 static void
62 iris_batch_reset(struct iris_batch *batch);
63
64 /**
65 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
66 */
67 static void
68 dump_validation_list(struct iris_batch *batch)
69 {
70 fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
71
72 for (int i = 0; i < batch->exec_count; i++) {
73 uint64_t flags = batch->validation_list[i].flags;
74 assert(batch->validation_list[i].handle ==
75 batch->exec_bos[i]->gem_handle);
76 fprintf(stderr, "[%2d]: %2d %-14s %p %-7s @ 0x%016llx (%"PRIu64"B) - %d refs\n",
77 i,
78 batch->validation_list[i].handle,
79 batch->exec_bos[i]->name,
80 batch->exec_bos[i],
81 (flags & EXEC_OBJECT_WRITE) ? "(write)" : "",
82 batch->validation_list[i].offset,
83 batch->exec_bos[i]->size,
84 batch->exec_bos[i]->refcount);
85 }
86 }
87
88 /**
89 * Return BO information to the batch decoder (for debugging).
90 */
91 static struct gen_batch_decode_bo
92 decode_get_bo(void *v_batch, uint64_t address)
93 {
94 struct iris_batch *batch = v_batch;
95
96 for (int i = 0; i < batch->exec_count; i++) {
97 struct iris_bo *bo = batch->exec_bos[i];
98 /* The decoder zeroes out the top 16 bits, so we need to as well */
99 uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
100
101 if (address >= bo_address && address < bo_address + bo->size) {
102 return (struct gen_batch_decode_bo) {
103 .addr = address,
104 .size = bo->size,
105 .map = iris_bo_map(batch->dbg, bo, MAP_READ) +
106 (address - bo_address),
107 };
108 }
109 }
110
111 return (struct gen_batch_decode_bo) { };
112 }
113
114 /**
115 * Decode the current batch.
116 */
117 static void
118 decode_batch(struct iris_batch *batch)
119 {
120 void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
121 gen_print_batch(&batch->decoder, map, batch->primary_batch_size,
122 batch->exec_bos[0]->gtt_offset);
123 }
124
125 static bool
126 uint_key_compare(const void *a, const void *b)
127 {
128 return a == b;
129 }
130
131 static uint32_t
132 uint_key_hash(const void *key)
133 {
134 return (uintptr_t) key;
135 }
136
137 void
138 iris_init_batch(struct iris_batch *batch,
139 struct iris_screen *screen,
140 struct iris_vtable *vtbl,
141 struct pipe_debug_callback *dbg,
142 struct iris_batch **all_batches,
143 const char *name,
144 uint8_t engine)
145 {
146 batch->screen = screen;
147 batch->vtbl = vtbl;
148 batch->dbg = dbg;
149 batch->name = name;
150
151 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
152 assert((engine & ~I915_EXEC_RING_MASK) == 0);
153 assert(util_bitcount(engine) == 1);
154 batch->engine = engine;
155
156 batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
157 assert(batch->hw_ctx_id);
158
159 batch->exec_count = 0;
160 batch->exec_array_size = 100;
161 batch->exec_bos =
162 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
163 batch->validation_list =
164 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
165
166 batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
167 _mesa_key_pointer_equal);
168 batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
169 _mesa_key_pointer_equal);
170
171 memset(batch->other_batches, 0, sizeof(batch->other_batches));
172
173 for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
174 if (all_batches[i] != batch)
175 batch->other_batches[j++] = all_batches[i];
176 }
177
178 if (unlikely(INTEL_DEBUG)) {
179 batch->state_sizes =
180 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
181
182 const unsigned decode_flags =
183 GEN_BATCH_DECODE_FULL |
184 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
185 GEN_BATCH_DECODE_OFFSETS |
186 GEN_BATCH_DECODE_FLOATS;
187
188 gen_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
189 stderr, decode_flags, NULL,
190 decode_get_bo, NULL, batch);
191 batch->decoder.max_vbo_decoded_lines = 32;
192 }
193
194 iris_batch_reset(batch);
195 }
196
197 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
198
199 static struct drm_i915_gem_exec_object2 *
200 find_validation_entry(struct iris_batch *batch, struct iris_bo *bo)
201 {
202 unsigned index = READ_ONCE(bo->index);
203
204 if (index < batch->exec_count && batch->exec_bos[index] == bo)
205 return &batch->validation_list[index];
206
207 /* May have been shared between multiple active batches */
208 for (index = 0; index < batch->exec_count; index++) {
209 if (batch->exec_bos[index] == bo)
210 return &batch->validation_list[index];
211 }
212
213 return NULL;
214 }
215
216 /**
217 * Add a buffer to the current batch's validation list.
218 *
219 * You must call this on any BO you wish to use in this batch, to ensure
220 * that it's resident when the GPU commands execute.
221 */
222 void
223 iris_use_pinned_bo(struct iris_batch *batch,
224 struct iris_bo *bo,
225 bool writable)
226 {
227 assert(bo->kflags & EXEC_OBJECT_PINNED);
228
229 struct drm_i915_gem_exec_object2 *existing_entry =
230 find_validation_entry(batch, bo);
231
232 if (existing_entry) {
233 /* The BO is already in the validation list; mark it writable */
234 if (writable)
235 existing_entry->flags |= EXEC_OBJECT_WRITE;
236
237 return;
238 }
239
240 /* This is the first time our batch has seen this BO. Before we use it,
241 * we need to see if other batches reference it - if so, we should flush
242 * those first.
243 */
244 for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) {
245 // XXX: this is bad, we use the same state / instruction buffers for
246 // both batches, and if both of them are reading some dynamic state,
247 // we flush all the time. check for writes vs. reads?
248 if (iris_batch_references(batch->other_batches[b], bo))
249 iris_batch_flush(batch->other_batches[b]);
250 }
251
252 /* Now, take a reference and add it to the validation list. */
253 iris_bo_reference(bo);
254
255 if (batch->exec_count == batch->exec_array_size) {
256 batch->exec_array_size *= 2;
257 batch->exec_bos =
258 realloc(batch->exec_bos,
259 batch->exec_array_size * sizeof(batch->exec_bos[0]));
260 batch->validation_list =
261 realloc(batch->validation_list,
262 batch->exec_array_size * sizeof(batch->validation_list[0]));
263 }
264
265 batch->validation_list[batch->exec_count] =
266 (struct drm_i915_gem_exec_object2) {
267 .handle = bo->gem_handle,
268 .offset = bo->gtt_offset,
269 .flags = bo->kflags | (writable ? EXEC_OBJECT_WRITE : 0),
270 };
271
272 bo->index = batch->exec_count;
273 batch->exec_bos[batch->exec_count] = bo;
274 batch->aperture_space += bo->size;
275
276 batch->exec_count++;
277 }
278
279 static void
280 create_batch(struct iris_batch *batch)
281 {
282 struct iris_screen *screen = batch->screen;
283 struct iris_bufmgr *bufmgr = screen->bufmgr;
284
285 batch->bo = iris_bo_alloc(bufmgr, "command buffer",
286 BATCH_SZ + BATCH_RESERVED, IRIS_MEMZONE_OTHER);
287 batch->bo->kflags |= EXEC_OBJECT_CAPTURE;
288 batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
289 batch->map_next = batch->map;
290
291 iris_use_pinned_bo(batch, batch->bo, false);
292 }
293
294 static void
295 iris_batch_reset(struct iris_batch *batch)
296 {
297 if (batch->last_bo != NULL) {
298 iris_bo_unreference(batch->last_bo);
299 batch->last_bo = NULL;
300 }
301 batch->last_bo = batch->bo;
302 batch->primary_batch_size = 0;
303 batch->contains_draw = false;
304
305 create_batch(batch);
306 assert(batch->bo->index == 0);
307
308 if (batch->state_sizes)
309 _mesa_hash_table_clear(batch->state_sizes, NULL);
310
311 iris_cache_sets_clear(batch);
312 }
313
314 void
315 iris_batch_free(struct iris_batch *batch)
316 {
317 struct iris_screen *screen = batch->screen;
318 struct iris_bufmgr *bufmgr = screen->bufmgr;
319
320 for (int i = 0; i < batch->exec_count; i++) {
321 iris_bo_unreference(batch->exec_bos[i]);
322 }
323 free(batch->exec_bos);
324 free(batch->validation_list);
325 iris_bo_unreference(batch->bo);
326 batch->bo = NULL;
327 batch->map = NULL;
328 batch->map_next = NULL;
329
330 iris_bo_unreference(batch->last_bo);
331
332 iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
333
334 _mesa_hash_table_destroy(batch->cache.render, NULL);
335 _mesa_set_destroy(batch->cache.depth, NULL);
336
337 if (batch->state_sizes) {
338 _mesa_hash_table_destroy(batch->state_sizes, NULL);
339 gen_batch_decode_ctx_finish(&batch->decoder);
340 }
341 }
342
343 /**
344 * If we've chained to a secondary batch, or are getting near to the end,
345 * then flush. This should only be called between draws.
346 */
347 void
348 iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)
349 {
350 if (batch->bo != batch->exec_bos[0] ||
351 iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {
352 iris_batch_flush(batch);
353 }
354 }
355
356 void
357 iris_chain_to_new_batch(struct iris_batch *batch)
358 {
359 /* We only support chaining a single time. */
360 assert(batch->bo == batch->exec_bos[0]);
361
362 uint32_t *cmd = batch->map_next;
363 uint64_t *addr = batch->map_next + 4;
364 batch->map_next += 8;
365
366 /* No longer held by batch->bo, still held by validation list */
367 iris_bo_unreference(batch->bo);
368 batch->primary_batch_size = iris_batch_bytes_used(batch);
369 create_batch(batch);
370
371 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
372 *cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
373 *addr = batch->bo->gtt_offset;
374 }
375
376 /**
377 * Terminate a batch with MI_BATCH_BUFFER_END.
378 */
379 static void
380 iris_finish_batch(struct iris_batch *batch)
381 {
382 // XXX: ISP DIS
383
384 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
385 uint32_t *map = batch->map_next;
386
387 map[0] = (0xA << 23);
388
389 batch->map_next += 4;
390
391 if (batch->bo == batch->exec_bos[0])
392 batch->primary_batch_size = iris_batch_bytes_used(batch);
393 }
394
395 /**
396 * Submit the batch to the GPU via execbuffer2.
397 */
398 static int
399 submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
400 {
401 iris_bo_unmap(batch->bo);
402
403 /* The requirement for using I915_EXEC_NO_RELOC are:
404 *
405 * The addresses written in the objects must match the corresponding
406 * reloc.gtt_offset which in turn must match the corresponding
407 * execobject.offset.
408 *
409 * Any render targets written to in the batch must be flagged with
410 * EXEC_OBJECT_WRITE.
411 *
412 * To avoid stalling, execobject.offset should match the current
413 * address of that object within the active context.
414 */
415 struct drm_i915_gem_execbuffer2 execbuf = {
416 .buffers_ptr = (uintptr_t) batch->validation_list,
417 .buffer_count = batch->exec_count,
418 .batch_start_offset = 0,
419 /* This must be QWord aligned. */
420 .batch_len = ALIGN(batch->primary_batch_size, 8),
421 .flags = batch->engine |
422 I915_EXEC_NO_RELOC |
423 I915_EXEC_BATCH_FIRST |
424 I915_EXEC_HANDLE_LUT,
425 .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
426 };
427
428 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
429
430 if (in_fence_fd != -1) {
431 execbuf.rsvd2 = in_fence_fd;
432 execbuf.flags |= I915_EXEC_FENCE_IN;
433 }
434
435 if (out_fence_fd != NULL) {
436 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
437 *out_fence_fd = -1;
438 execbuf.flags |= I915_EXEC_FENCE_OUT;
439 }
440
441 int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
442 if (ret != 0) {
443 ret = -errno;
444 DBG("execbuf FAILED: errno = %d\n", -ret);
445 fprintf(stderr, "execbuf FAILED: errno = %d\n", -ret);
446 abort();
447 } else {
448 DBG("execbuf succeeded\n");
449 }
450
451 for (int i = 0; i < batch->exec_count; i++) {
452 struct iris_bo *bo = batch->exec_bos[i];
453
454 bo->idle = false;
455 bo->index = -1;
456 }
457
458 if (ret == 0 && out_fence_fd != NULL)
459 *out_fence_fd = execbuf.rsvd2 >> 32;
460
461 return ret;
462 }
463
464 /**
465 * Flush the batch buffer, submitting it to the GPU and resetting it so
466 * we're ready to emit the next batch.
467 *
468 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
469 * ownership of the fd.
470 *
471 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
472 * take ownership of the returned fd.
473 */
474 int
475 _iris_batch_flush_fence(struct iris_batch *batch,
476 int in_fence_fd, int *out_fence_fd,
477 const char *file, int line)
478 {
479 if (iris_batch_bytes_used(batch) == 0)
480 return 0;
481
482 iris_finish_batch(batch);
483
484 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
485 int bytes_for_commands = iris_batch_bytes_used(batch);
486 int second_bytes = 0;
487 if (batch->bo != batch->exec_bos[0]) {
488 second_bytes = bytes_for_commands;
489 bytes_for_commands += batch->primary_batch_size;
490 }
491 fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
492 "(cmds), %4d BOs (%0.1fMb aperture)\n",
493 file, line, batch->name, batch->hw_ctx_id,
494 batch->primary_batch_size, second_bytes,
495 100.0f * bytes_for_commands / BATCH_SZ,
496 batch->exec_count,
497 (float) batch->aperture_space / (1024 * 1024));
498 dump_validation_list(batch);
499 }
500
501 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
502 decode_batch(batch);
503 }
504
505 int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
506
507 //throttle(iris);
508
509 if (ret >= 0) {
510 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
511 //iris_check_for_reset(ice);
512
513 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
514 dbg_printf("waiting for idle\n");
515 iris_bo_wait_rendering(batch->bo);
516 }
517 } else {
518 #ifdef DEBUG
519 const bool color = INTEL_DEBUG & DEBUG_COLOR;
520 fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
521 color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
522 abort();
523 #endif
524 }
525
526 /* Clean up after the batch we submitted and prepare for a new one. */
527 for (int i = 0; i < batch->exec_count; i++) {
528 iris_bo_unreference(batch->exec_bos[i]);
529 batch->exec_bos[i] = NULL;
530 }
531 batch->exec_count = 0;
532 batch->aperture_space = 0;
533
534 /* Start a new batch buffer. */
535 iris_batch_reset(batch);
536
537 return 0;
538 }
539
540 /**
541 * Does the current batch refer to the given BO?
542 *
543 * (In other words, is the BO in the current batch's validation list?)
544 */
545 bool
546 iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
547 {
548 return find_validation_entry(batch, bo) != NULL;
549 }