iris: move MAX defines to iris_batch.h
[mesa.git] / src / gallium / drivers / iris / iris_batch.c
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24
25 #include "iris_batch.h"
26 #include "iris_bufmgr.h"
27 #include "iris_context.h"
28
29 #include "drm-uapi/i915_drm.h"
30
31 #include "util/hash_table.h"
32 #include "main/macros.h"
33
34 #include <errno.h>
35 #include <xf86drm.h>
36
37 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
38
39 /**
40 * Target sizes of the batch and state buffers. We create the initial
41 * buffers at these sizes, and flush when they're nearly full. If we
42 * underestimate how close we are to the end, and suddenly need more space
43 * in the middle of a draw, we can grow the buffers, and finish the draw.
44 * At that point, we'll be over our target size, so the next operation
45 * should flush. Each time we flush the batch, we recreate both buffers
46 * at the original target size, so it doesn't grow without bound.
47 */
48 #define BATCH_SZ (20 * 1024)
49 #define STATE_SZ (18 * 1024)
50
51 static void
52 iris_batch_reset(struct iris_batch *batch);
53
54 UNUSED static void
55 dump_validation_list(struct iris_batch *batch)
56 {
57 fprintf(stderr, "Validation list (length %d):\n", batch->exec_count);
58
59 for (int i = 0; i < batch->exec_count; i++) {
60 assert(batch->validation_list[i].handle ==
61 batch->exec_bos[i]->gem_handle);
62 fprintf(stderr, "[%d] = %d %s %p\n", i,
63 batch->validation_list[i].handle,
64 batch->exec_bos[i]->name,
65 batch->exec_bos[i]);
66 }
67 }
68
69 static bool
70 uint_key_compare(const void *a, const void *b)
71 {
72 return a == b;
73 }
74
75 static uint32_t
76 uint_key_hash(const void *key)
77 {
78 return (uintptr_t) key;
79 }
80
81 static void
82 init_reloc_list(struct iris_reloc_list *rlist, int count)
83 {
84 rlist->reloc_count = 0;
85 rlist->reloc_array_size = count;
86 rlist->relocs = malloc(rlist->reloc_array_size *
87 sizeof(struct drm_i915_gem_relocation_entry));
88 }
89
90 static void
91 create_batch_buffer(struct iris_bufmgr *bufmgr,
92 struct iris_batch_buffer *buf,
93 const char *name, unsigned size)
94 {
95 buf->bo = iris_bo_alloc(bufmgr, name, size, 4096);
96 buf->bo->kflags |= EXEC_OBJECT_CAPTURE;
97 buf->map = iris_bo_map(NULL, buf->bo, MAP_READ | MAP_WRITE);
98 buf->map_next = buf->map;
99 }
100
101 void
102 iris_init_batch(struct iris_batch *batch,
103 struct iris_screen *screen,
104 struct pipe_debug_callback *dbg,
105 uint8_t ring)
106 {
107 batch->screen = screen;
108 batch->dbg = dbg;
109
110 /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
111 assert((ring & ~I915_EXEC_RING_MASK) == 0);
112 assert(util_bitcount(ring) == 1);
113 batch->ring = ring;
114
115 init_reloc_list(&batch->cmdbuf.relocs, 256);
116 init_reloc_list(&batch->statebuf.relocs, 256);
117
118 batch->exec_count = 0;
119 batch->exec_array_size = 100;
120 batch->exec_bos =
121 malloc(batch->exec_array_size * sizeof(batch->exec_bos[0]));
122 batch->validation_list =
123 malloc(batch->exec_array_size * sizeof(batch->validation_list[0]));
124
125 if (unlikely(INTEL_DEBUG)) {
126 batch->state_sizes =
127 _mesa_hash_table_create(NULL, uint_key_hash, uint_key_compare);
128 }
129
130 iris_batch_reset(batch);
131 }
132
133 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
134
135 static unsigned
136 add_exec_bo(struct iris_batch *batch, struct iris_bo *bo)
137 {
138 unsigned index = READ_ONCE(bo->index);
139
140 if (index < batch->exec_count && batch->exec_bos[index] == bo)
141 return index;
142
143 /* May have been shared between multiple active batches */
144 for (index = 0; index < batch->exec_count; index++) {
145 if (batch->exec_bos[index] == bo)
146 return index;
147 }
148
149 iris_bo_reference(bo);
150
151 if (batch->exec_count == batch->exec_array_size) {
152 batch->exec_array_size *= 2;
153 batch->exec_bos =
154 realloc(batch->exec_bos,
155 batch->exec_array_size * sizeof(batch->exec_bos[0]));
156 batch->validation_list =
157 realloc(batch->validation_list,
158 batch->exec_array_size * sizeof(batch->validation_list[0]));
159 }
160
161 batch->validation_list[batch->exec_count] =
162 (struct drm_i915_gem_exec_object2) {
163 .handle = bo->gem_handle,
164 .alignment = bo->align,
165 .offset = bo->gtt_offset,
166 .flags = bo->kflags,
167 };
168
169 bo->index = batch->exec_count;
170 batch->exec_bos[batch->exec_count] = bo;
171 batch->aperture_space += bo->size;
172
173 return batch->exec_count++;
174 }
175
176 static void
177 iris_batch_reset(struct iris_batch *batch)
178 {
179 struct iris_screen *screen = batch->screen;
180 struct iris_bufmgr *bufmgr = screen->bufmgr;
181
182 if (batch->last_cmd_bo != NULL) {
183 iris_bo_unreference(batch->last_cmd_bo);
184 batch->last_cmd_bo = NULL;
185 }
186 batch->last_cmd_bo = batch->cmdbuf.bo;
187
188 create_batch_buffer(bufmgr, &batch->cmdbuf, "command buffer", BATCH_SZ);
189 create_batch_buffer(bufmgr, &batch->statebuf, "state buffer", STATE_SZ);
190
191 /* Avoid making 0 a valid state offset - otherwise the decoder will try
192 * and decode data when we use offset 0 as a null pointer.
193 */
194 batch->statebuf.map_next += 1;
195
196 add_exec_bo(batch, batch->cmdbuf.bo);
197 assert(batch->cmdbuf.bo->index == 0);
198
199 if (batch->state_sizes)
200 _mesa_hash_table_clear(batch->state_sizes, NULL);
201 }
202
203 static void
204 iris_batch_reset_and_clear_render_cache(struct iris_batch *batch)
205 {
206 iris_batch_reset(batch);
207 // XXX: iris_render_cache_set_clear(batch);
208 }
209
210 static void
211 free_batch_buffer(struct iris_batch_buffer *buf)
212 {
213 iris_bo_unreference(buf->bo);
214 buf->bo = NULL;
215 buf->map = NULL;
216 buf->map_next = NULL;
217
218 free(buf->relocs.relocs);
219 buf->relocs.relocs = NULL;
220 buf->relocs.reloc_array_size = 0;
221 }
222
223 void
224 iris_batch_free(struct iris_batch *batch)
225 {
226 for (int i = 0; i < batch->exec_count; i++) {
227 iris_bo_unreference(batch->exec_bos[i]);
228 }
229 free(batch->exec_bos);
230 free(batch->validation_list);
231 free_batch_buffer(&batch->cmdbuf);
232 free_batch_buffer(&batch->statebuf);
233
234 iris_bo_unreference(batch->last_cmd_bo);
235
236 if (batch->state_sizes)
237 _mesa_hash_table_destroy(batch->state_sizes, NULL);
238 }
239
240 /**
241 * Finish copying the old batch/state buffer's contents to the new one
242 * after we tried to "grow" the buffer in an earlier operation.
243 */
244 static void
245 finish_growing_bos(struct iris_batch_buffer *buf)
246 {
247 struct iris_bo *old_bo = buf->partial_bo;
248 if (!old_bo)
249 return;
250
251 void *old_map = old_bo->map_cpu ? old_bo->map_cpu : old_bo->map_wc;
252 memcpy(buf->map, old_map, buf->partial_bytes);
253
254 buf->partial_bo = NULL;
255 buf->partial_bytes = 0;
256
257 iris_bo_unreference(old_bo);
258 }
259
260 static unsigned
261 buffer_bytes_used(struct iris_batch_buffer *buf)
262 {
263 return buf->map_next - buf->map;
264 }
265
266 /**
267 * Grow either the batch or state buffer to a new larger size.
268 *
269 * We can't actually grow buffers, so we allocate a new one, copy over
270 * the existing contents, and update our lists to refer to the new one.
271 *
272 * Note that this is only temporary - each new batch recreates the buffers
273 * at their original target size (BATCH_SZ or STATE_SZ).
274 */
275 static void
276 grow_buffer(struct iris_batch *batch,
277 struct iris_batch_buffer *buf,
278 unsigned new_size)
279 {
280 struct iris_bufmgr *bufmgr = batch->screen->bufmgr;
281 struct iris_bo *bo = buf->bo;
282
283 perf_debug(batch->dbg, "Growing %s - ran out of space\n", bo->name);
284
285 if (buf->partial_bo) {
286 /* We've already grown once, and now we need to do it again.
287 * Finish our last grow operation so we can start a new one.
288 * This should basically never happen.
289 */
290 perf_debug(batch->dbg, "Had to grow multiple times");
291 finish_growing_bos(buf);
292 }
293
294 const unsigned existing_bytes = buffer_bytes_used(buf);
295
296 struct iris_bo *new_bo =
297 iris_bo_alloc(bufmgr, bo->name, new_size, bo->align);
298
299 buf->map = iris_bo_map(NULL, new_bo, MAP_READ | MAP_WRITE);
300 buf->map_next = buf->map + existing_bytes;
301
302 /* Try to put the new BO at the same GTT offset as the old BO (which
303 * we're throwing away, so it doesn't need to be there).
304 *
305 * This guarantees that our relocations continue to work: values we've
306 * already written into the buffer, values we're going to write into the
307 * buffer, and the validation/relocation lists all will match.
308 *
309 * Also preserve kflags for EXEC_OBJECT_CAPTURE.
310 */
311 new_bo->gtt_offset = bo->gtt_offset;
312 new_bo->index = bo->index;
313 new_bo->kflags = bo->kflags;
314
315 /* Batch/state buffers are per-context, and if we've run out of space,
316 * we must have actually used them before, so...they will be in the list.
317 */
318 assert(bo->index < batch->exec_count);
319 assert(batch->exec_bos[bo->index] == bo);
320
321 /* Update the validation list to use the new BO. */
322 batch->exec_bos[bo->index] = new_bo;
323 batch->validation_list[bo->index].handle = new_bo->gem_handle;
324
325 /* Exchange the two BOs...without breaking pointers to the old BO.
326 *
327 * Consider this scenario:
328 *
329 * 1. Somebody calls brw_state_batch() to get a region of memory, and
330 * and then creates a brw_address pointing to brw->batch.state.bo.
331 * 2. They then call brw_state_batch() a second time, which happens to
332 * grow and replace the state buffer. They then try to emit a
333 * relocation to their first section of memory.
334 *
335 * If we replace the brw->batch.state.bo pointer at step 2, we would
336 * break the address created in step 1. They'd have a pointer to the
337 * old destroyed BO. Emitting a relocation would add this dead BO to
338 * the validation list...causing /both/ statebuffers to be in the list,
339 * and all kinds of disasters.
340 *
341 * This is not a contrived case - BLORP vertex data upload hits this.
342 *
343 * There are worse scenarios too. Fences for GL sync objects reference
344 * brw->batch.batch.bo. If we replaced the batch pointer when growing,
345 * we'd need to chase down every fence and update it to point to the
346 * new BO. Otherwise, it would refer to a "batch" that never actually
347 * gets submitted, and would fail to trigger.
348 *
349 * To work around both of these issues, we transmutate the buffers in
350 * place, making the existing struct brw_bo represent the new buffer,
351 * and "new_bo" represent the old BO. This is highly unusual, but it
352 * seems like a necessary evil.
353 *
354 * We also defer the memcpy of the existing batch's contents. Callers
355 * may make multiple brw_state_batch calls, and retain pointers to the
356 * old BO's map. We'll perform the memcpy in finish_growing_bo() when
357 * we finally submit the batch, at which point we've finished uploading
358 * state, and nobody should have any old references anymore.
359 *
360 * To do that, we keep a reference to the old BO in grow->partial_bo,
361 * and store the number of bytes to copy in grow->partial_bytes. We
362 * can monkey with the refcounts directly without atomics because these
363 * are per-context BOs and they can only be touched by this thread.
364 */
365 assert(new_bo->refcount == 1);
366 new_bo->refcount = bo->refcount;
367 bo->refcount = 1;
368
369 struct iris_bo tmp;
370 memcpy(&tmp, bo, sizeof(struct iris_bo));
371 memcpy(bo, new_bo, sizeof(struct iris_bo));
372 memcpy(new_bo, &tmp, sizeof(struct iris_bo));
373
374 buf->partial_bo = new_bo; /* the one reference of the OLD bo */
375 buf->partial_bytes = existing_bytes;
376 }
377
378 static void
379 require_buffer_space(struct iris_batch *batch,
380 struct iris_batch_buffer *buf,
381 unsigned size,
382 unsigned flush_threshold,
383 unsigned max_buffer_size)
384 {
385 const unsigned required_bytes = buffer_bytes_used(buf) + size;
386
387 if (!batch->no_wrap && required_bytes >= flush_threshold) {
388 iris_batch_flush(batch);
389 } else if (required_bytes >= buf->bo->size) {
390 grow_buffer(batch, buf,
391 MIN2(buf->bo->size + buf->bo->size / 2, max_buffer_size));
392 assert(required_bytes < buf->bo->size);
393 }
394 }
395
396
397 void
398 iris_require_command_space(struct iris_batch *batch, unsigned size)
399 {
400 require_buffer_space(batch, &batch->cmdbuf, size, BATCH_SZ, MAX_BATCH_SIZE);
401 }
402
403 /**
404 * Reserve some space in the statebuffer, or flush.
405 *
406 * This is used to estimate when we're near the end of the batch,
407 * so we can flush early.
408 */
409 void
410 iris_require_state_space(struct iris_batch *batch, unsigned size)
411 {
412 require_buffer_space(batch, &batch->statebuf, size, STATE_SZ,
413 MAX_STATE_SIZE);
414 }
415
416 void
417 iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
418 {
419 iris_require_command_space(batch, size);
420 memcpy(batch->cmdbuf.map_next, data, size);
421 }
422
423 /**
424 * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
425 * sending it off.
426 *
427 * This function can emit state (say, to preserve registers that aren't saved
428 * between batches).
429 */
430 static void
431 iris_finish_batch(struct iris_batch *batch)
432 {
433 batch->no_wrap = true;
434
435 /* Mark the end of the buffer. */
436 const uint32_t MI_BATCH_BUFFER_END = (0xA << 23);
437 iris_batch_emit(batch, &MI_BATCH_BUFFER_END, sizeof(uint32_t));
438
439 batch->no_wrap = false;
440 }
441
442 static int
443 submit_batch(struct iris_batch *batch, int in_fence_fd, int *out_fence_fd)
444 {
445 iris_bo_unmap(batch->cmdbuf.bo);
446 iris_bo_unmap(batch->statebuf.bo);
447
448 /* The requirement for using I915_EXEC_NO_RELOC are:
449 *
450 * The addresses written in the objects must match the corresponding
451 * reloc.gtt_offset which in turn must match the corresponding
452 * execobject.offset.
453 *
454 * Any render targets written to in the batch must be flagged with
455 * EXEC_OBJECT_WRITE.
456 *
457 * To avoid stalling, execobject.offset should match the current
458 * address of that object within the active context.
459 */
460 /* Set statebuffer relocations */
461 const unsigned state_index = batch->statebuf.bo->index;
462 if (state_index < batch->exec_count &&
463 batch->exec_bos[state_index] == batch->statebuf.bo) {
464 struct drm_i915_gem_exec_object2 *entry =
465 &batch->validation_list[state_index];
466 assert(entry->handle == batch->statebuf.bo->gem_handle);
467 entry->relocation_count = batch->statebuf.relocs.reloc_count;
468 entry->relocs_ptr = (uintptr_t) batch->statebuf.relocs.relocs;
469 }
470
471 /* Set batchbuffer relocations */
472 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[0];
473 assert(entry->handle == batch->cmdbuf.bo->gem_handle);
474 entry->relocation_count = batch->cmdbuf.relocs.reloc_count;
475 entry->relocs_ptr = (uintptr_t) batch->cmdbuf.relocs.relocs;
476
477 struct drm_i915_gem_execbuffer2 execbuf = {
478 .buffers_ptr = (uintptr_t) batch->validation_list,
479 .buffer_count = batch->exec_count,
480 .batch_start_offset = 0,
481 .batch_len = buffer_bytes_used(&batch->cmdbuf),
482 .flags = batch->ring |
483 I915_EXEC_NO_RELOC |
484 I915_EXEC_BATCH_FIRST |
485 I915_EXEC_HANDLE_LUT,
486 .rsvd1 = batch->hw_ctx_id, /* rsvd1 is actually the context ID */
487 };
488
489 unsigned long cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2;
490
491 if (in_fence_fd != -1) {
492 execbuf.rsvd2 = in_fence_fd;
493 execbuf.flags |= I915_EXEC_FENCE_IN;
494 }
495
496 if (out_fence_fd != NULL) {
497 cmd = DRM_IOCTL_I915_GEM_EXECBUFFER2_WR;
498 *out_fence_fd = -1;
499 execbuf.flags |= I915_EXEC_FENCE_OUT;
500 }
501
502 #if 0
503 int ret = drm_ioctl(batch->screen->fd, cmd, &execbuf);
504 if (ret != 0)
505 ret = -errno;
506 #else
507 int ret = 0;
508 fprintf(stderr, "execbuf disabled for now\n");
509 #endif
510
511 for (int i = 0; i < batch->exec_count; i++) {
512 struct iris_bo *bo = batch->exec_bos[i];
513
514 bo->idle = false;
515 bo->index = -1;
516
517 /* Update iris_bo::gtt_offset */
518 if (batch->validation_list[i].offset != bo->gtt_offset) {
519 DBG("BO %d migrated: 0x%" PRIx64 " -> 0x%llx\n",
520 bo->gem_handle, bo->gtt_offset,
521 batch->validation_list[i].offset);
522 bo->gtt_offset = batch->validation_list[i].offset;
523 }
524 }
525
526 if (ret == 0 && out_fence_fd != NULL)
527 *out_fence_fd = execbuf.rsvd2 >> 32;
528
529 return ret;
530 }
531
532 /**
533 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
534 * of the fd.
535 *
536 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
537 * of the returned fd.
538 */
539 int
540 _iris_batch_flush_fence(struct iris_batch *batch,
541 int in_fence_fd, int *out_fence_fd,
542 const char *file, int line)
543 {
544 if (buffer_bytes_used(&batch->cmdbuf) == 0)
545 return 0;
546
547 /* Check that we didn't just wrap our batchbuffer at a bad time. */
548 assert(!batch->no_wrap);
549
550 iris_finish_batch(batch);
551
552 if (unlikely(INTEL_DEBUG & (DEBUG_BATCH | DEBUG_SUBMIT))) {
553 int bytes_for_commands = buffer_bytes_used(&batch->cmdbuf);
554 int bytes_for_state = buffer_bytes_used(&batch->statebuf);
555 fprintf(stderr, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
556 " %5db (%0.1f%%) (state), %4d BOs (%0.1fMb aperture),"
557 " %4d batch relocs, %4d state relocs\n", file, line,
558 bytes_for_commands, 100.0f * bytes_for_commands / BATCH_SZ,
559 bytes_for_state, 100.0f * bytes_for_state / STATE_SZ,
560 batch->exec_count,
561 (float) batch->aperture_space / (1024 * 1024),
562 batch->cmdbuf.relocs.reloc_count,
563 batch->statebuf.relocs.reloc_count);
564 }
565
566 int ret = submit_batch(batch, in_fence_fd, out_fence_fd);
567 if (ret < 0)
568 return ret;
569
570 //throttle(brw);
571
572 //if (unlikely(INTEL_DEBUG & DEBUG_BATCH))
573 //do_batch_dump(brw);
574
575 //if (brw->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
576 //iris_check_for_reset(ice);
577
578 if (unlikely(INTEL_DEBUG & DEBUG_SYNC)) {
579 dbg_printf("waiting for idle\n");
580 iris_bo_wait_rendering(batch->cmdbuf.bo);
581 }
582
583 /* Clean up after the batch we submitted and prepare for a new one. */
584 for (int i = 0; i < batch->exec_count; i++) {
585 iris_bo_unreference(batch->exec_bos[i]);
586 batch->exec_bos[i] = NULL;
587 }
588 batch->cmdbuf.relocs.reloc_count = 0;
589 batch->statebuf.relocs.reloc_count = 0;
590 batch->exec_count = 0;
591 batch->aperture_space = 0;
592
593 iris_bo_unreference(batch->statebuf.bo);
594
595 /* Start a new batch buffer. */
596 iris_batch_reset_and_clear_render_cache(batch);
597
598 return 0;
599 }
600
601 bool
602 iris_batch_references(struct iris_batch *batch, struct iris_bo *bo)
603 {
604 unsigned index = READ_ONCE(bo->index);
605 if (index < batch->exec_count && batch->exec_bos[index] == bo)
606 return true;
607
608 for (int i = 0; i < batch->exec_count; i++) {
609 if (batch->exec_bos[i] == bo)
610 return true;
611 }
612 return false;
613 }
614
615 /* This is the only way buffers get added to the validate list.
616 */
617 static uint64_t
618 emit_reloc(struct iris_batch *batch,
619 struct iris_reloc_list *rlist, uint32_t offset,
620 struct iris_bo *target, uint32_t target_offset,
621 unsigned int reloc_flags)
622 {
623 assert(target != NULL);
624
625 if (rlist->reloc_count == rlist->reloc_array_size) {
626 rlist->reloc_array_size *= 2;
627 rlist->relocs = realloc(rlist->relocs,
628 rlist->reloc_array_size *
629 sizeof(struct drm_i915_gem_relocation_entry));
630 }
631
632 unsigned int index = add_exec_bo(batch, target);
633 struct drm_i915_gem_exec_object2 *entry = &batch->validation_list[index];
634
635 rlist->relocs[rlist->reloc_count++] =
636 (struct drm_i915_gem_relocation_entry) {
637 .offset = offset,
638 .delta = target_offset,
639 .target_handle = index,
640 .presumed_offset = entry->offset,
641 };
642
643 /* Using the old buffer offset, write in what the right data would be, in
644 * case the buffer doesn't move and we can short-circuit the relocation
645 * processing in the kernel
646 */
647 return entry->offset + target_offset;
648 }
649
650 uint64_t
651 iris_batch_reloc(struct iris_batch *batch, uint32_t batch_offset,
652 struct iris_bo *target, uint32_t target_offset,
653 unsigned int reloc_flags)
654 {
655 assert(batch_offset <= batch->cmdbuf.bo->size - sizeof(uint32_t));
656
657 return emit_reloc(batch, &batch->cmdbuf.relocs, batch_offset,
658 target, target_offset, reloc_flags);
659 }
660
661 uint64_t
662 iris_state_reloc(struct iris_batch *batch, uint32_t state_offset,
663 struct iris_bo *target, uint32_t target_offset,
664 unsigned int reloc_flags)
665 {
666 assert(state_offset <= batch->statebuf.bo->size - sizeof(uint32_t));
667
668 return emit_reloc(batch, &batch->statebuf.relocs, state_offset,
669 target, target_offset, reloc_flags);
670 }
671
672
673 static uint32_t
674 iris_state_entry_size(struct iris_batch *batch, uint32_t offset)
675 {
676 struct hash_entry *entry =
677 _mesa_hash_table_search(batch->state_sizes, (void *)(uintptr_t) offset);
678 return entry ? (uintptr_t) entry->data : 0;
679 }
680
681 /**
682 * Allocates a block of space in the batchbuffer for indirect state.
683 */
684 void *
685 iris_alloc_state(struct iris_batch *batch,
686 int size, int alignment,
687 uint32_t *out_offset)
688 {
689 assert(size < batch->cmdbuf.bo->size);
690
691 const unsigned existing_bytes = buffer_bytes_used(&batch->statebuf);
692 unsigned aligned_size =
693 ALIGN(existing_bytes, alignment) - existing_bytes + size;
694
695 require_buffer_space(batch, &batch->statebuf, aligned_size,
696 STATE_SZ, MAX_STATE_SIZE);
697
698 unsigned offset = ALIGN(buffer_bytes_used(&batch->statebuf), alignment);
699
700 if (unlikely(batch->state_sizes)) {
701 _mesa_hash_table_insert(batch->state_sizes,
702 (void *) (uintptr_t) offset,
703 (void *) (uintptr_t) size);
704 }
705
706 batch->statebuf.map_next += aligned_size;
707
708 *out_offset = offset;
709 return batch->statebuf.map_next;
710 }
711
712 uint32_t
713 iris_emit_state(struct iris_batch *batch,
714 const void *data,
715 int size, int alignment)
716 {
717 uint32_t out_offset;
718 void *dest = iris_alloc_state(batch, size, alignment, &out_offset);
719 memcpy(dest, data, size);
720 return out_offset;
721 }