iris: Move get_command_space to iris_batch.c
[mesa.git] / src / gallium / drivers / iris / iris_batch.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "iris_batch.h"
26 #include "iris_bufmgr.h"
27 #include "iris_context.h"
28
29 #include "drm-uapi/i915_drm.h"
30
31 #include "util/hash_table.h"
32 #include "main/macros.h"
33
34 #include <errno.h>
35 #include <xf86drm.h>
36
37 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
38
39 /**
40 * Target sizes of the batch and state buffers. We create the initial
41 * buffers at these sizes, and flush when they're nearly full. If we
42 * underestimate how close we are to the end, and suddenly need more space
43 * in the middle of a draw, we can grow the buffers, and finish the draw.
44 * At that point, we'll be over our target size, so the next operation
45 * should flush. Each time we flush the batch, we recreate both buffers
46 * at the original target size, so it doesn't grow without bound.
47 */
48 #define BATCH_SZ (20 * 1024)
49 #define STATE_SZ (18 * 1024)
50
51 static void decode_batch(struct iris_batch *batch);
52
53 static void
54 iris_batch_reset(struct iris_batch *batch);
55
56 UNUSED static void
57 dump_validation_list(struct iris_batch *batch)
58 {
59 fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
60
61 for (int i = 0; i < batch->exec_count; i++) {
62 uint64_t flags = batch->validation_list[i].flags;
63 assert(batch->validation_list[i].handle ==
64 batch->exec_bos[i]->gem_handle);
65 fprintf(stderr, "[%2d]: %2d %-14s %p %s%-7s @ 0x%016llx (%"PRIu64"B)\n",
66 i,
67 batch->validation_list[i].handle,
68 batch->exec_bos[i]->name,
69 batch->exec_bos[i],
70 (flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS) ? "(48b" : "(32b",
71 (flags & EXEC_OBJECT_WRITE) ? " write)" : ")",
72 batch->validation_list[i].offset,
73 batch->exec_bos[i]->size);
74 }
75 }
76
77 static struct gen_batch_decode_bo
78 decode_get_bo(void *v_batch, uint64_t address)
79 {
80 struct iris_batch *batch = v_batch;
81
82 for (int i = 0; i < batch->exec_count; i++) {
83 struct iris_bo *bo = batch->exec_bos[i];
84 /* The decoder zeroes out the top 16 bits, so we need to as well */
85 uint64_t bo_address = bo->gtt_offset & (~0ull >> 16);
86
87 if (address >= bo_address && address < bo_address + bo->size) {
88 return (struct gen_batch_decode_bo) {
89 .addr = address,
90 .size = bo->size,
91 .map = iris_bo_map(batch->dbg, bo, MAP_READ) +
92 (address - bo_address),
93 };
94 }
95 }
96
97 return (struct gen_batch_decode_bo) { };
98 }
99
100 static bool
101 uint_key_compare(const void *a, const void *b)
102 {
103 return a == b;
104 }
105
106 static uint32_t
107 uint_key_hash(const void *key)
108 {
109 return (uintptr_t) key;
110 }
111
112 static void
113 create_batch_buffer(struct iris_bufmgr *bufmgr,
114 struct iris_batch_buffer *buf,
115 const char *name, unsigned size)
116 {
117 buf->bo = iris_bo_alloc(bufmgr, name, size, IRIS_MEMZONE_OTHER);
118 buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
119 buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
120 buf->map_next = buf->map;
121 }
122
123 void
124 iris_init_batch(struct iris_batch *batch,
125 struct iris_screen *screen,
126 struct pipe_debug_callback *dbg,
127 uint8_t ring)
128 {
129 batch->screen = screen;
130 batch->dbg = dbg;
131
132 /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
133 assert((ring & ~I915_EXEC_RING_MASK) == 0);
134 assert(util_bitcount(ring) == 1);
135 batch->ring = ring;
136
137 batch->exec_count = 0;
138 batch->exec_array_size = 100;
139 batch->exec_bos =
140 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
141 batch->validation_list =
142 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
143
144 if (unlikely(INTEL_DEBUG)) {
145 batch->state_sizes =
146 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
147
148 const unsigned decode_flags =
149 GEN_BATCH_DECODE_FULL |
150 ((INTEL_DEBUG & DEBUG_COLOR) ? GEN_BATCH_DECODE_IN_COLOR : 0) |
151 GEN_BATCH_DECODE_OFFSETS |
152 GEN_BATCH_DECODE_FLOATS;
153
154 gen_batch_decode_ctx_init(&batch->decoder, &screen->devinfo,
155 stderr, decode_flags, NULL,
156 decode_get_bo, NULL, batch);
157 }
158
159 iris_batch_reset(batch);
160 }
161
162 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
163
164 static unsigned
165 add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
166 {
167 unsigned index = READ_ONCE(bo->index);
168
169 if (index < batch->exec_count && batch->exec_bos[index] == bo)
170 return index;
171
172 /* May have been shared between multiple active batches */
173 for (index = 0; index < batch->exec_count; index++) {
174 if (batch->exec_bos[index] == bo)
175 return index;
176 }
177
178 iris_bo_reference(bo);
179
180 if (batch->exec_count == batch->exec_array_size) {
181 batch->exec_array_size *= 2;
182 batch->exec_bos =
183 realloc(batch->exec_bos,
184 batch->exec_array_size * sizeof(batch->exec_bos[0]));
185 batch->validation_list =
186 realloc(batch->validation_list,
187 batch->exec_array_size * sizeof(batch->validation_list[0]));
188 }
189
190 batch->validation_list[batch->exec_count] =
191 (struct drm_i915_gem_exec_object2) {
192 .handle = bo->gem_handle,
193 .offset = bo->gtt_offset,
194 .flags = bo->kflags,
195 };
196
197 bo->index = batch->exec_count;
198 batch->exec_bos[batch->exec_count] = bo;
199 batch->aperture_space += bo->size;
200
201 return batch->exec_count++;
202 }
203
204 static void
205 iris_batch_reset(struct iris_batch *batch)
206 {
207 struct iris_screen *screen = batch->screen;
208 struct iris_bufmgr *bufmgr = screen->bufmgr;
209
210 if (batch->last_cmd_bo != NULL) {
211 iris_bo_unreference(batch->last_cmd_bo);
212 batch->last_cmd_bo = NULL;
213 }
214 batch->last_cmd_bo = batch->cmdbuf.bo;
215
216 create_batch_buffer(bufmgr, &batch->cmdbuf, "command buffer", BATCH_SZ);
217
218 add_exec_bo(batch, batch->cmdbuf.bo);
219 assert(batch->cmdbuf.bo->index == 0);
220
221 if (batch->state_sizes)
222 _mesa_hash_table_clear(batch->state_sizes, NULL);
223 }
224
225 static void
226 iris_batch_reset_and_clear_render_cache(struct iris_batch *batch)
227 {
228 iris_batch_reset(batch);
229 // XXX: iris_render_cache_set_clear(batch);
230 }
231
232 static void
233 free_batch_buffer(struct iris_batch_buffer *buf)
234 {
235 iris_bo_unreference(buf->bo);
236 buf->bo = NULL;
237 buf->map = NULL;
238 buf->map_next = NULL;
239 }
240
241 void
242 iris_batch_free(struct iris_batch *batch)
243 {
244 for (int i = 0; i < batch->exec_count; i++) {
245 iris_bo_unreference(batch->exec_bos[i]);
246 }
247 free(batch->exec_bos);
248 free(batch->validation_list);
249 free_batch_buffer(&batch->cmdbuf);
250
251 iris_bo_unreference(batch->last_cmd_bo);
252
253 if (batch->state_sizes) {
254 _mesa_hash_table_destroy(batch->state_sizes, NULL);
255 gen_batch_decode_ctx_finish(&batch->decoder);
256 }
257 }
258
259 /**
260 * Finish copying the old batch/state buffer's contents to the new one
261 * after we tried to "grow" the buffer in an earlier operation.
262 */
263 static void
264 finish_growing_bos(struct iris_batch_buffer *buf)
265 {
266 struct iris_bo *old_bo = buf->partial_bo;
267 if (!old_bo)
268 return;
269
270 void *old_map = old_bo->map_cpu ? old_bo->map_cpu : old_bo->map_wc;
271 memcpy(buf->map, old_map, buf->partial_bytes);
272
273 buf->partial_bo = NULL;
274 buf->partial_bytes = 0;
275
276 iris_bo_unreference(old_bo);
277 }
278
279 static unsigned
280 buffer_bytes_used(struct iris_batch_buffer *buf)
281 {
282 return buf->map_next - buf->map;
283 }
284
285 /**
286 * Grow either the batch or state buffer to a new larger size.
287 *
288 * We can't actually grow buffers, so we allocate a new one, copy over
289 * the existing contents, and update our lists to refer to the new one.
290 *
291 * Note that this is only temporary - each new batch recreates the buffers
292 * at their original target size (BATCH_SZ or STATE_SZ).
293 */
294 static void
295 grow_buffer(struct iris_batch *batch,
296 struct iris_batch_buffer *buf,
297 unsigned new_size)
298 {
299 struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
300 struct iris_bo *bo = buf->bo;
301
302 perf_debug(batch->dbg, "Growing %s - ran out of space\n", bo->name);
303
304 if (buf->partial_bo) {
305 /* We've already grown once, and now we need to do it again.
306 * Finish our last grow operation so we can start a new one.
307 * This should basically never happen.
308 */
309 perf_debug(batch->dbg, "Had to grow multiple times");
310 finish_growing_bos(buf);
311 }
312
313 const unsigned existing_bytes = buffer_bytes_used(buf);
314
315 struct iris_bo *new_bo =
316 iris_bo_alloc(bufmgr, bo->name, new_size, IRIS_MEMZONE_OTHER);
317
318 buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
319 buf->map_next = buf->map + existing_bytes;
320
321 /* Try to put the new BO at the same GTT offset as the old BO (which
322 * we're throwing away, so it doesn't need to be there).
323 *
324 * This guarantees that our relocations continue to work: values we've
325 * already written into the buffer, values we're going to write into the
326 * buffer, and the validation/relocation lists all will match.
327 *
328 * Also preserve kflags for EXEC_OBJECT_CAPTURE.
329 */
330 new_bo->gtt_offset = bo->gtt_offset;
331 new_bo->index = bo->index;
332 new_bo->kflags = bo->kflags;
333
334 /* Batch/state buffers are per-context, and if we've run out of space,
335 * we must have actually used them before, so...they will be in the list.
336 */
337 assert(bo->index < batch->exec_count);
338 assert(batch->exec_bos[bo->index] == bo);
339
340 /* Update the validation list to use the new BO. */
341 batch->exec_bos[bo->index] = new_bo;
342 batch->validation_list[bo->index].handle = new_bo->gem_handle;
343
344 /* Exchange the two BOs...without breaking pointers to the old BO.
345 *
346 * Consider this scenario:
347 *
348 * 1. Somebody calls iris_state_batch() to get a region of memory, and
349 * and then creates a iris_address pointing to iris->batch.state.bo.
350 * 2. They then call iris_state_batch() a second time, which happens to
351 * grow and replace the state buffer. They then try to emit a
352 * relocation to their first section of memory.
353 *
354 * If we replace the iris->batch.state.bo pointer at step 2, we would
355 * break the address created in step 1. They'd have a pointer to the
356 * old destroyed BO. Emitting a relocation would add this dead BO to
357 * the validation list...causing /both/ statebuffers to be in the list,
358 * and all kinds of disasters.
359 *
360 * This is not a contrived case - BLORP vertex data upload hits this.
361 *
362 * There are worse scenarios too. Fences for GL sync objects reference
363 * iris->batch.batch.bo. If we replaced the batch pointer when growing,
364 * we'd need to chase down every fence and update it to point to the
365 * new BO. Otherwise, it would refer to a "batch" that never actually
366 * gets submitted, and would fail to trigger.
367 *
368 * To work around both of these issues, we transmutate the buffers in
369 * place, making the existing struct iris_bo represent the new buffer,
370 * and "new_bo" represent the old BO. This is highly unusual, but it
371 * seems like a necessary evil.
372 *
373 * We also defer the memcpy of the existing batch's contents. Callers
374 * may make multiple iris_state_batch calls, and retain pointers to the
375 * old BO's map. We'll perform the memcpy in finish_growing_bo() when
376 * we finally submit the batch, at which point we've finished uploading
377 * state, and nobody should have any old references anymore.
378 *
379 * To do that, we keep a reference to the old BO in grow->partial_bo,
380 * and store the number of bytes to copy in grow->partial_bytes. We
381 * can monkey with the refcounts directly without atomics because these
382 * are per-context BOs and they can only be touched by this thread.
383 */
384 assert(new_bo->refcount == 1);
385 new_bo->refcount = bo->refcount;
386 bo->refcount = 1;
387
388 struct iris_bo tmp;
389 memcpy(&tmp, bo, sizeof(struct iris_bo));
390 memcpy(bo, new_bo, sizeof(struct iris_bo));
391 memcpy(new_bo, &tmp, sizeof(struct iris_bo));
392
393 buf->partial_bo = new_bo; /* the one reference of the OLD bo */
394 buf->partial_bytes = existing_bytes;
395 }
396
397 static void
398 require_buffer_space(struct iris_batch *batch,
399 struct iris_batch_buffer *buf,
400 unsigned size,
401 unsigned flush_threshold,
402 unsigned max_buffer_size)
403 {
404 const unsigned required_bytes = buffer_bytes_used(buf) + size;
405
406 if (!batch->no_wrap && required_bytes >= flush_threshold) {
407 iris_batch_flush(batch);
408 } else if (required_bytes >= buf->bo->size) {
409 grow_buffer(batch, buf,
410 MIN2(buf->bo->size + buf->bo->size / 2, max_buffer_size));
411 assert(required_bytes < buf->bo->size);
412 }
413 }
414
415
416 void
417 iris_require_command_space(struct iris_batch *batch, unsigned size)
418 {
419 require_buffer_space(batch, &batch->cmdbuf, size, BATCH_SZ, MAX_BATCH_SIZE);
420 }
421
422 void *
423 iris_get_command_space(struct iris_batch *batch, unsigned bytes)
424 {
425 iris_require_command_space(batch, bytes);
426 void *map = batch->cmdbuf.map_next;
427 batch->cmdbuf.map_next += bytes;
428 return map;
429 }
430
431 void
432 iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
433 {
434 void *map = iris_get_command_space(batch, size);
435 memcpy(map, data, size);
436 }
437
438 /**
439 * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
440 * sending it off.
441 *
442 * This function can emit state (say, to preserve registers that aren't saved
443 * between batches).
444 */
445 static void
446 iris_finish_batch(struct iris_batch *batch)
447 {
448 batch->no_wrap = true;
449
450 // XXX: ISP DIS
451
452 /* Emit MI_BATCH_BUFFER_END to finish our batch. Note that execbuf2
453 * requires our batch size to be QWord aligned, so we pad it out if
454 * necessary by emitting an extra MI_NOOP after the end.
455 */
456 const uint32_t MI_BATCH_BUFFER_END_AND_NOOP[2] = { (0xA << 23), 0 };
457 const bool qword_aligned = (buffer_bytes_used(&batch->cmdbuf) % 8) == 0;
458 iris_batch_emit(batch, MI_BATCH_BUFFER_END_AND_NOOP, qword_aligned ? 8 : 4);
459
460 batch->no_wrap = false;
461 }
462
463 static int
464 submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
465 {
466 iris_bo_unmap(batch->cmdbuf.bo);
467
468 /* The requirement for using I915_EXEC_NO_RELOC are:
469 *
470 * The addresses written in the objects must match the corresponding
471 * reloc.gtt_offset which in turn must match the corresponding
472 * execobject.offset.
473 *
474 * Any render targets written to in the batch must be flagged with
475 * EXEC_OBJECT_WRITE.
476 *
477 * To avoid stalling, execobject.offset should match the current
478 * address of that object within the active context.
479 */
480 struct drm_i915_gem_execbuffer2 execbuf = {
481 .buffers_ptr = (uintptr_t) batch->validation_list,
482 .buffer_count = batch->exec_count,
483 .batch_start_offset = 0,
484 .batch_len = buffer_bytes_used(&batch->cmdbuf),
485 .flags = batch->ring |
486 I915_EXEC_NO_RELOC |
487 I915_EXEC_BATCH_FIRST |
488 I915_EXEC_HANDLE_LUT,
489 .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
490 };
491
492 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
493
494 if (in_fence_fd != -1) {
495 execbuf.rsvd2 = in_fence_fd;
496 execbuf.flags |= I915_EXEC_FENCE_IN;
497 }
498
499 if (out_fence_fd != NULL) {
500 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
501 *out_fence_fd = -1;
502 execbuf.flags |= I915_EXEC_FENCE_OUT;
503 }
504
505 int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
506 if (ret != 0) {
507 ret = -errno;
508 DBG("execbuf FAILED: errno = %d\n", -ret);
509 } else {
510 DBG("execbuf succeeded\n");
511 }
512
513 for (int i = 0; i < batch->exec_count; i++) {
514 struct iris_bo *bo = batch->exec_bos[i];
515
516 bo->idle = false;
517 bo->index = -1;
518 }
519
520 if (ret == 0 && out_fence_fd != NULL)
521 *out_fence_fd = execbuf.rsvd2 >> 32;
522
523 return ret;
524 }
525
526 /**
527 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
528 * of the fd.
529 *
530 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
531 * of the returned fd.
532 */
533 int
534 _iris_batch_flush_fence(struct iris_batch *batch,
535 int in_fence_fd, int *out_fence_fd,
536 const char *file, int line)
537 {
538 if (buffer_bytes_used(&batch->cmdbuf) == 0)
539 return 0;
540
541 /* Check that we didn't just wrap our batchbuffer at a bad time. */
542 assert(!batch->no_wrap);
543
544 iris_finish_batch(batch);
545
546 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
547 int bytes_for_commands = buffer_bytes_used(&batch->cmdbuf);
548 fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%), "
549 "%4d BOs (%0.1fMb aperture)\n",
550 file, line,
551 bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
552 batch->exec_count,
553 (float) batch->aperture_space / (1024 * 1024));
554 dump_validation_list(batch);
555 }
556
557 if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
558 decode_batch(batch);
559
560 int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
561
562 //throttle(iris);
563
564 if (ret < 0)
565 return ret;
566
567 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
568 //iris_check_for_reset(ice);
569
570 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
571 dbg_printf("waiting for idle\n");
572 iris_bo_wait_rendering(batch->cmdbuf.bo);
573 }
574
575 /* Clean up after the batch we submitted and prepare for a new one. */
576 for (int i = 0; i < batch->exec_count; i++) {
577 iris_bo_unreference(batch->exec_bos[i]);
578 batch->exec_bos[i] = NULL;
579 }
580 batch->exec_count = 0;
581 batch->aperture_space = 0;
582
583 /* Start a new batch buffer. */
584 iris_batch_reset_and_clear_render_cache(batch);
585
586 return 0;
587 }
588
589 bool
590 iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
591 {
592 unsigned index = READ_ONCE(bo->index);
593 if (index < batch->exec_count && batch->exec_bos[index] == bo)
594 return true;
595
596 for (int i = 0; i < batch->exec_count; i++) {
597 if (batch->exec_bos[i] == bo)
598 return true;
599 }
600 return false;
601 }
602
603 /* This is the only way buffers get added to the validate list.
604 */
605 void
606 iris_use_pinned_bo(struct iris_batch *batch,
607 struct iris_bo *bo,
608 bool writable)
609 {
610 assert(bo->kflags & EXEC_OBJECT_PINNED);
611 unsigned index = add_exec_bo(batch, bo);
612 if (writable)
613 batch->validation_list[index].flags |= EXEC_OBJECT_WRITE;
614 }
615
616 static void
617 decode_batch(struct iris_batch *batch)
618 {
619 gen_print_batch(&batch->decoder, batch->cmdbuf.map,
620 buffer_bytes_used(&batch->cmdbuf),
621 batch->cmdbuf.bo->gtt_offset);
622 }