iris: more TODO
[mesa.git] / src / gallium / drivers / iris / iris_batch.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included
12 * in all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20 * DEALINGS IN THE SOFTWARE.
21 */
22
23 /**
24 * @file iris_batch.c
25 *
26 * Batchbuffer and command submission module.
27 *
28 * Every API draw call results in a number of GPU commands, which we
29 * collect into a "batch buffer". Typically, many draw calls are grouped
30 * into a single batch to amortize command submission overhead.
31 *
32 * We submit batches to the kernel using the I915_GEM_EXECBUFFER2 ioctl.
33 * One critical piece of data is the "validation list", which contains a
34 * list of the buffer objects (BOs) which the commands in the GPU need.
35 * The kernel will make sure these are resident and pinned at the correct
36 * virtual memory address before executing our batch. If a BO is not in
37 * the validation list, it effectively does not exist, so take care.
38 */
39
40 #include "iris_batch.h"
41 #include "iris_bufmgr.h"
42 #include "iris_context.h"
43
44 #include "drm-uapi/i915_drm.h"
45
46 #include "util/hash_table.h"
47 #include "util/set.h"
48 #include "main/macros.h"
49
50 #include <errno.h>
51 #include <xf86drm.h>
52
53 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
54
55 /* Terminating the batch takes either 4 bytes for MI_BATCH_BUFFER_END
56 * or 12 bytes for MI_BATCH_BUFFER_START (when chaining). Plus, we may
57 * need an extra 4 bytes to pad out to the nearest QWord. So reserve 16.
58 */
59 #define BATCH_RESERVED 16
60
61 static void
62 iris_batch_reset(struct iris_batch *batch);
63
64 /**
65 * Debugging code to dump the validation list, used by INTEL_DEBUG=submit.
66 */
67 static void
68 dump_validation_list(struct iris_batch *batch)
69 {
70 fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
71
72 for (int i = 0; i < batch->exec_count; i++) {
73 uint64_t flags = batch->validation_list[i].flags;
74 assert(batch->validation_list[i].handle ==
75 batch->exec_bos[i]->gem_handle);
76 fprintf(stderr, "[%2d]: %2d %-14s %p %-7s @ 0x%016llx (%"PRIu64"B) - %d refs\n",
77 i,
78 batch->validation_list[i].handle,
79 batch->exec_bos[i]->name,
80 batch->exec_bos[i],
81 (flags & EXEC_OBJECT_WRITE) ? "(write)" : "",
82 batch->validation_list[i].offset,
83 batch->exec_bos[i]->size,
84 batch->exec_bos[i]->refcount);
85 }
86 }
87
88 /**
89 * Return BO information to the batch decoder (for debugging).
90 */
91 static struct gen_batch_decode_bo
92 decode_get_bo(void *v_batch, uint64_t address)
93 {
94 struct iris_batch *batch = v_batch;
95
96 for (int i = 0; i < batch->exec_count; i++) {
97 struct iris_bo *bo = batch->exec_bos[i];
98 /* The decoder zeroes out the top 16 bits, so we need to as well */
99 uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
100
101 if (address >= bo_address && address < bo_address + bo->size) {
102 return (struct gen_batch_decode_bo) {
103 .addr = address,
104 .size = bo->size,
105 .map = iris_bo_map(batch->dbg, bo, MAP_READ) +
106 (address - bo_address),
107 };
108 }
109 }
110
111 return (struct gen_batch_decode_bo) { };
112 }
113
114 /**
115 * Decode the current batch.
116 */
117 static void
118 decode_batch(struct iris_batch *batch)
119 {
120 void *map = iris_bo_map(batch->dbg, batch->exec_bos[0], MAP_READ);
121 gen_print_batch(&batch->decoder, map, batch->primary_batch_size,
122 batch->exec_bos[0]->gtt_offset);
123 }
124
125 static bool
126 uint_key_compare(const void *a, const void *b)
127 {
128 return a == b;
129 }
130
131 static uint32_t
132 uint_key_hash(const void *key)
133 {
134 return (uintptr_t) key;
135 }
136
137 void
138 iris_init_batch(struct iris_batch *batch,
139 struct iris_screen *screen,
140 struct iris_vtable *vtbl,
141 struct pipe_debug_callback *dbg,
142 struct iris_batch **all_batches,
143 const char *name,
144 uint8_t engine)
145 {
146 batch->screen = screen;
147 batch->vtbl = vtbl;
148 batch->dbg = dbg;
149 batch->name = name;
150
151 /* engine should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
152 assert((engine & ~I915_EXEC_RING_MASK) == 0);
153 assert(util_bitcount(engine) == 1);
154 batch->engine = engine;
155
156 batch->hw_ctx_id = iris_create_hw_context(screen->bufmgr);
157 assert(batch->hw_ctx_id);
158
159 batch->exec_count = 0;
160 batch->exec_array_size = 100;
161 batch->exec_bos =
162 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
163 batch->validation_list =
164 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
165
166 batch->cache.render = _mesa_hash_table_create(NULL, _mesa_hash_pointer,
167 _mesa_key_pointer_equal);
168 batch->cache.depth = _mesa_set_create(NULL, _mesa_hash_pointer,
169 _mesa_key_pointer_equal);
170
171 memset(batch->other_batches, 0, sizeof(batch->other_batches));
172
173 for (int i = 0, j = 0; i < IRIS_BATCH_COUNT; i++) {
174 if (all_batches[i] != batch)
175 batch->other_batches[j++] = all_batches[i];
176 }
177
178 if (unlikely(INTEL_DEBUG)) {
179 batch->state_sizes =
180 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
181
182 const unsigned decode_flags =
183 GEN_BATCH_DECODE_FULL |
184 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
185 GEN_BATCH_DECODE_OFFSETS |
186 GEN_BATCH_DECODE_FLOATS;
187
188 gen_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
189 stderr, decode_flags, NULL,
190 decode_get_bo, NULL, batch);
191 batch->decoder.max_vbo_decoded_lines = 32;
192 }
193
194 iris_batch_reset(batch);
195 }
196
197 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
198
199 static unsigned
200 add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
201 {
202 unsigned index = READ_ONCE(bo->index);
203
204 if (index < batch->exec_count && batch->exec_bos[index] == bo)
205 return index;
206
207 /* May have been shared between multiple active batches */
208 for (index = 0; index < batch->exec_count; index++) {
209 if (batch->exec_bos[index] == bo)
210 return index;
211 }
212
213 /* This is the first time our batch has seen this BO. Before we use it,
214 * we need to see if other batches reference it - if so, we should flush
215 * those first.
216 */
217 for (int b = 0; b < ARRAY_SIZE(batch->other_batches); b++) {
218 // XXX: this is bad, we use the same state / instruction buffers for
219 // both batches, and if both of them are reading some dynamic state,
220 // we flush all the time. check for writes vs. reads?
221 //
222 // XXX: need to combine add_exec_bo and iris_use_pinned_bo so that
223 // we know whether we're writing the buffer or not.
224 if (iris_batch_references(batch->other_batches[b], bo))
225 iris_batch_flush(batch->other_batches[b]);
226 }
227
228 /* Now, take a reference and add it to the validation list. */
229 iris_bo_reference(bo);
230
231 if (batch->exec_count == batch->exec_array_size) {
232 batch->exec_array_size *= 2;
233 batch->exec_bos =
234 realloc(batch->exec_bos,
235 batch->exec_array_size * sizeof(batch->exec_bos[0]));
236 batch->validation_list =
237 realloc(batch->validation_list,
238 batch->exec_array_size * sizeof(batch->validation_list[0]));
239 }
240
241 batch->validation_list[batch->exec_count] =
242 (struct drm_i915_gem_exec_object2) {
243 .handle = bo->gem_handle,
244 .offset = bo->gtt_offset,
245 .flags = bo->kflags,
246 };
247
248 bo->index = batch->exec_count;
249 batch->exec_bos[batch->exec_count] = bo;
250 batch->aperture_space += bo->size;
251
252 return batch->exec_count++;
253 }
254
255 static void
256 create_batch(struct iris_batch *batch)
257 {
258 struct iris_screen *screen = batch->screen;
259 struct iris_bufmgr *bufmgr = screen->bufmgr;
260
261 batch->bo = iris_bo_alloc(bufmgr, "command buffer",
262 BATCH_SZ + BATCH_RESERVED, IRIS_MEMZONE_OTHER);
263 batch->bo->kflags |= EXEC_OBJECT_CAPTURE;
264 batch->map = iris_bo_map(NULL, batch->bo, MAP_READ | MAP_WRITE);
265 batch->map_next = batch->map;
266
267 add_exec_bo(batch, batch->bo);
268 }
269
270 static void
271 iris_batch_reset(struct iris_batch *batch)
272 {
273 if (batch->last_bo != NULL) {
274 iris_bo_unreference(batch->last_bo);
275 batch->last_bo = NULL;
276 }
277 batch->last_bo = batch->bo;
278 batch->primary_batch_size = 0;
279 batch->contains_draw = false;
280
281 create_batch(batch);
282 assert(batch->bo->index == 0);
283
284 if (batch->state_sizes)
285 _mesa_hash_table_clear(batch->state_sizes, NULL);
286
287 iris_cache_sets_clear(batch);
288 }
289
290 void
291 iris_batch_free(struct iris_batch *batch)
292 {
293 struct iris_screen *screen = batch->screen;
294 struct iris_bufmgr *bufmgr = screen->bufmgr;
295
296 for (int i = 0; i < batch->exec_count; i++) {
297 iris_bo_unreference(batch->exec_bos[i]);
298 }
299 free(batch->exec_bos);
300 free(batch->validation_list);
301 iris_bo_unreference(batch->bo);
302 batch->bo = NULL;
303 batch->map = NULL;
304 batch->map_next = NULL;
305
306 iris_bo_unreference(batch->last_bo);
307
308 iris_destroy_hw_context(bufmgr, batch->hw_ctx_id);
309
310 _mesa_hash_table_destroy(batch->cache.render, NULL);
311 _mesa_set_destroy(batch->cache.depth, NULL);
312
313 if (batch->state_sizes) {
314 _mesa_hash_table_destroy(batch->state_sizes, NULL);
315 gen_batch_decode_ctx_finish(&batch->decoder);
316 }
317 }
318
319 /**
320 * If we've chained to a secondary batch, or are getting near to the end,
321 * then flush. This should only be called between draws.
322 */
323 void
324 iris_batch_maybe_flush(struct iris_batch *batch, unsigned estimate)
325 {
326 if (batch->bo != batch->exec_bos[0] ||
327 iris_batch_bytes_used(batch) + estimate >= BATCH_SZ) {
328 iris_batch_flush(batch);
329 }
330 }
331
332 void
333 iris_chain_to_new_batch(struct iris_batch *batch)
334 {
335 /* We only support chaining a single time. */
336 assert(batch->bo == batch->exec_bos[0]);
337
338 uint32_t *cmd = batch->map_next;
339 uint64_t *addr = batch->map_next + 4;
340 batch->map_next += 8;
341
342 /* No longer held by batch->bo, still held by validation list */
343 iris_bo_unreference(batch->bo);
344 batch->primary_batch_size = iris_batch_bytes_used(batch);
345 create_batch(batch);
346
347 /* Emit MI_BATCH_BUFFER_START to chain to another batch. */
348 *cmd = (0x31 << 23) | (1 << 8) | (3 - 2);
349 *addr = batch->bo->gtt_offset;
350 }
351
352 /**
353 * Terminate a batch with MI_BATCH_BUFFER_END.
354 */
355 static void
356 iris_finish_batch(struct iris_batch *batch)
357 {
358 // XXX: ISP DIS
359
360 /* Emit MI_BATCH_BUFFER_END to finish our batch. */
361 uint32_t *map = batch->map_next;
362
363 map[0] = (0xA << 23);
364
365 batch->map_next += 4;
366
367 if (batch->bo == batch->exec_bos[0])
368 batch->primary_batch_size = iris_batch_bytes_used(batch);
369 }
370
371 /**
372 * Submit the batch to the GPU via execbuffer2.
373 */
374 static int
375 submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
376 {
377 iris_bo_unmap(batch->bo);
378
379 /* The requirement for using I915_EXEC_NO_RELOC are:
380 *
381 * The addresses written in the objects must match the corresponding
382 * reloc.gtt_offset which in turn must match the corresponding
383 * execobject.offset.
384 *
385 * Any render targets written to in the batch must be flagged with
386 * EXEC_OBJECT_WRITE.
387 *
388 * To avoid stalling, execobject.offset should match the current
389 * address of that object within the active context.
390 */
391 struct drm_i915_gem_execbuffer2 execbuf = {
392 .buffers_ptr = (uintptr_t) batch->validation_list,
393 .buffer_count = batch->exec_count,
394 .batch_start_offset = 0,
395 /* This must be QWord aligned. */
396 .batch_len = ALIGN(batch->primary_batch_size, 8),
397 .flags = batch->engine |
398 I915_EXEC_NO_RELOC |
399 I915_EXEC_BATCH_FIRST |
400 I915_EXEC_HANDLE_LUT,
401 .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
402 };
403
404 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
405
406 if (in_fence_fd != -1) {
407 execbuf.rsvd2 = in_fence_fd;
408 execbuf.flags |= I915_EXEC_FENCE_IN;
409 }
410
411 if (out_fence_fd != NULL) {
412 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
413 *out_fence_fd = -1;
414 execbuf.flags |= I915_EXEC_FENCE_OUT;
415 }
416
417 int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
418 if (ret != 0) {
419 ret = -errno;
420 DBG("execbuf FAILED: errno = %d\n", -ret);
421 fprintf(stderr, "execbuf FAILED: errno = %d\n", -ret);
422 abort();
423 } else {
424 DBG("execbuf succeeded\n");
425 }
426
427 for (int i = 0; i < batch->exec_count; i++) {
428 struct iris_bo *bo = batch->exec_bos[i];
429
430 bo->idle = false;
431 bo->index = -1;
432 }
433
434 if (ret == 0 && out_fence_fd != NULL)
435 *out_fence_fd = execbuf.rsvd2 >> 32;
436
437 return ret;
438 }
439
440 /**
441 * Flush the batch buffer, submitting it to the GPU and resetting it so
442 * we're ready to emit the next batch.
443 *
444 * \param in_fence_fd is ignored if -1. Otherwise, this function takes
445 * ownership of the fd.
446 *
447 * \param out_fence_fd is ignored if NULL. Otherwise, the caller must
448 * take ownership of the returned fd.
449 */
450 int
451 _iris_batch_flush_fence(struct iris_batch *batch,
452 int in_fence_fd, int *out_fence_fd,
453 const char *file, int line)
454 {
455 if (iris_batch_bytes_used(batch) == 0)
456 return 0;
457
458 iris_finish_batch(batch);
459
460 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
461 int bytes_for_commands = iris_batch_bytes_used(batch);
462 int second_bytes = 0;
463 if (batch->bo != batch->exec_bos[0]) {
464 second_bytes = bytes_for_commands;
465 bytes_for_commands += batch->primary_batch_size;
466 }
467 fprintf(stderr, "%19s:%-3d: %s batch [%u] flush with %5d+%5db (%0.1f%%) "
468 "(cmds), %4d BOs (%0.1fMb aperture)\n",
469 file, line, batch->name, batch->hw_ctx_id,
470 batch->primary_batch_size, second_bytes,
471 100.0f * bytes_for_commands / BATCH_SZ,
472 batch->exec_count,
473 (float) batch->aperture_space / (1024 * 1024));
474 dump_validation_list(batch);
475 }
476
477 if (unlikely(INTEL_DEBUG & DEBUG_BATCH)) {
478 decode_batch(batch);
479 }
480
481 int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
482
483 //throttle(iris);
484
485 if (ret >= 0) {
486 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
487 //iris_check_for_reset(ice);
488
489 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
490 dbg_printf("waiting for idle\n");
491 iris_bo_wait_rendering(batch->bo);
492 }
493 } else {
494 #ifdef DEBUG
495 const bool color = INTEL_DEBUG & DEBUG_COLOR;
496 fprintf(stderr, "%siris: Failed to submit batchbuffer: %-80s%s\n",
497 color ? "\e[1;41m" : "", strerror(-ret), color ? "\e[0m" : "");
498 abort();
499 #endif
500 }
501
502 /* Clean up after the batch we submitted and prepare for a new one. */
503 for (int i = 0; i < batch->exec_count; i++) {
504 iris_bo_unreference(batch->exec_bos[i]);
505 batch->exec_bos[i] = NULL;
506 }
507 batch->exec_count = 0;
508 batch->aperture_space = 0;
509
510 /* Start a new batch buffer. */
511 iris_batch_reset(batch);
512
513 return 0;
514 }
515
516 /**
517 * Does the current batch refer to the given BO?
518 *
519 * (In other words, is the BO in the current batch's validation list?)
520 */
521 bool
522 iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
523 {
524 unsigned index = READ_ONCE(bo->index);
525 if (index < batch->exec_count && batch->exec_bos[index] == bo)
526 return true;
527
528 for (int i = 0; i < batch->exec_count; i++) {
529 if (batch->exec_bos[i] == bo)
530 return true;
531 }
532 return false;
533 }
534
535 /**
536 * Add a buffer to the current batch's validation list.
537 *
538 * You must call this on any BO you wish to use in this batch, to ensure
539 * that it's resident when the GPU commands execute.
540 */
541 void
542 iris_use_pinned_bo(struct iris_batch *batch,
543 struct iris_bo *bo,
544 bool writable)
545 {
546 assert(bo->kflags & EXEC_OBJECT_PINNED);
547 unsigned index = add_exec_bo(batch, bo);
548 if (writable)
549 batch->validation_list[index].flags |= EXEC_OBJECT_WRITE;
550 }