2 * Copyright © 2017 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the
6 * "Software"), to deal in the Software without restriction, including
7 * without limitation the rights to use, copy, modify, merge, publish,
8 * distribute, sublicense, and/or sell copies of the Software, and to
9 * permit persons to whom the Software is furnished to do so, subject to
10 * the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
17 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL VMWARE AND/OR ITS SUPPLIERS BE LIABLE FOR
20 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25 #include "iris_batch.h"
26 #include "iris_bufmgr.h"
27 #include "iris_context.h"
28 #include "common/gen_decoder.h"
30 #include "drm-uapi/i915_drm.h"
32 #include "util/hash_table.h"
33 #include "main/macros.h"
38 #define FILE_DEBUG_FLAG DEBUG_BUFMGR
41 * Target sizes of the batch and state buffers. We create the initial
42 * buffers at these sizes, and flush when they're nearly full. If we
43 * underestimate how close we are to the end, and suddenly need more space
44 * in the middle of a draw, we can grow the buffers, and finish the draw.
45 * At that point, we'll be over our target size, so the next operation
46 * should flush. Each time we flush the batch, we recreate both buffers
47 * at the original target size, so it doesn't grow without bound.
49 #define BATCH_SZ (20 * 1024)
50 #define STATE_SZ (18 * 1024)
52 static void decode_batch(struct iris_batch
*batch
);
55 iris_batch_reset(struct iris_batch
*batch
);
58 dump_validation_list(struct iris_batch
*batch
)
60 fprintf(stderr
, "Validation list (length %d):\n", batch
->exec_count
);
62 for (int i
= 0; i
< batch
->exec_count
; i
++) {
63 assert(batch
->validation_list
[i
].handle
==
64 batch
->exec_bos
[i
]->gem_handle
);
65 fprintf(stderr
, "[%d] = %d %s %p\n", i
,
66 batch
->validation_list
[i
].handle
,
67 batch
->exec_bos
[i
]->name
,
73 uint_key_compare(const void *a
, const void *b
)
79 uint_key_hash(const void *key
)
81 return (uintptr_t) key
;
85 init_reloc_list(struct iris_reloc_list
*rlist
, int count
)
87 rlist
->reloc_count
= 0;
88 rlist
->reloc_array_size
= count
;
89 rlist
->relocs
= malloc(rlist
->reloc_array_size
*
90 sizeof(struct drm_i915_gem_relocation_entry
));
94 create_batch_buffer(struct iris_bufmgr
*bufmgr
,
95 struct iris_batch_buffer
*buf
,
96 const char *name
, unsigned size
)
98 buf
->bo
= iris_bo_alloc(bufmgr
, name
, size
, 4096);
99 buf
->bo
->kflags
|= EXEC_OBJECT_CAPTURE
;
100 buf
->map
= iris_bo_map(NULL
, buf
->bo
, MAP_READ
| MAP_WRITE
);
101 buf
->map_next
= buf
->map
;
105 iris_init_batch(struct iris_batch
*batch
,
106 struct iris_screen
*screen
,
107 struct pipe_debug_callback
*dbg
,
110 batch
->screen
= screen
;
113 /* ring should be one of I915_EXEC_RENDER, I915_EXEC_BLT, etc. */
114 assert((ring
& ~I915_EXEC_RING_MASK
) == 0);
115 assert(util_bitcount(ring
) == 1);
118 init_reloc_list(&batch
->cmdbuf
.relocs
, 256);
119 init_reloc_list(&batch
->statebuf
.relocs
, 256);
121 batch
->exec_count
= 0;
122 batch
->exec_array_size
= 100;
124 malloc(batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
125 batch
->validation_list
=
126 malloc(batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
128 if (unlikely(INTEL_DEBUG
)) {
130 _mesa_hash_table_create(NULL
, uint_key_hash
, uint_key_compare
);
133 iris_batch_reset(batch
);
136 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
139 add_exec_bo(struct iris_batch
*batch
, struct iris_bo
*bo
)
141 unsigned index
= READ_ONCE(bo
->index
);
143 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
146 /* May have been shared between multiple active batches */
147 for (index
= 0; index
< batch
->exec_count
; index
++) {
148 if (batch
->exec_bos
[index
] == bo
)
152 iris_bo_reference(bo
);
154 if (batch
->exec_count
== batch
->exec_array_size
) {
155 batch
->exec_array_size
*= 2;
157 realloc(batch
->exec_bos
,
158 batch
->exec_array_size
* sizeof(batch
->exec_bos
[0]));
159 batch
->validation_list
=
160 realloc(batch
->validation_list
,
161 batch
->exec_array_size
* sizeof(batch
->validation_list
[0]));
164 batch
->validation_list
[batch
->exec_count
] =
165 (struct drm_i915_gem_exec_object2
) {
166 .handle
= bo
->gem_handle
,
167 .alignment
= bo
->align
,
168 .offset
= bo
->gtt_offset
,
172 bo
->index
= batch
->exec_count
;
173 batch
->exec_bos
[batch
->exec_count
] = bo
;
174 batch
->aperture_space
+= bo
->size
;
176 return batch
->exec_count
++;
180 iris_batch_reset(struct iris_batch
*batch
)
182 struct iris_screen
*screen
= batch
->screen
;
183 struct iris_bufmgr
*bufmgr
= screen
->bufmgr
;
185 if (batch
->last_cmd_bo
!= NULL
) {
186 iris_bo_unreference(batch
->last_cmd_bo
);
187 batch
->last_cmd_bo
= NULL
;
189 batch
->last_cmd_bo
= batch
->cmdbuf
.bo
;
191 create_batch_buffer(bufmgr
, &batch
->cmdbuf
, "command buffer", BATCH_SZ
);
192 create_batch_buffer(bufmgr
, &batch
->statebuf
, "state buffer", STATE_SZ
);
194 /* Avoid making 0 a valid state offset - otherwise the decoder will try
195 * and decode data when we use offset 0 as a null pointer.
197 batch
->statebuf
.map_next
+= 1;
199 add_exec_bo(batch
, batch
->cmdbuf
.bo
);
200 assert(batch
->cmdbuf
.bo
->index
== 0);
202 if (batch
->state_sizes
)
203 _mesa_hash_table_clear(batch
->state_sizes
, NULL
);
207 iris_batch_reset_and_clear_render_cache(struct iris_batch
*batch
)
209 iris_batch_reset(batch
);
210 // XXX: iris_render_cache_set_clear(batch);
214 free_batch_buffer(struct iris_batch_buffer
*buf
)
216 iris_bo_unreference(buf
->bo
);
219 buf
->map_next
= NULL
;
221 free(buf
->relocs
.relocs
);
222 buf
->relocs
.relocs
= NULL
;
223 buf
->relocs
.reloc_array_size
= 0;
227 iris_batch_free(struct iris_batch
*batch
)
229 for (int i
= 0; i
< batch
->exec_count
; i
++) {
230 iris_bo_unreference(batch
->exec_bos
[i
]);
232 free(batch
->exec_bos
);
233 free(batch
->validation_list
);
234 free_batch_buffer(&batch
->cmdbuf
);
235 free_batch_buffer(&batch
->statebuf
);
237 iris_bo_unreference(batch
->last_cmd_bo
);
239 if (batch
->state_sizes
)
240 _mesa_hash_table_destroy(batch
->state_sizes
, NULL
);
244 * Finish copying the old batch/state buffer's contents to the new one
245 * after we tried to "grow" the buffer in an earlier operation.
248 finish_growing_bos(struct iris_batch_buffer
*buf
)
250 struct iris_bo
*old_bo
= buf
->partial_bo
;
254 void *old_map
= old_bo
->map_cpu
? old_bo
->map_cpu
: old_bo
->map_wc
;
255 memcpy(buf
->map
, old_map
, buf
->partial_bytes
);
257 buf
->partial_bo
= NULL
;
258 buf
->partial_bytes
= 0;
260 iris_bo_unreference(old_bo
);
264 buffer_bytes_used(struct iris_batch_buffer
*buf
)
266 return buf
->map_next
- buf
->map
;
270 * Grow either the batch or state buffer to a new larger size.
272 * We can't actually grow buffers, so we allocate a new one, copy over
273 * the existing contents, and update our lists to refer to the new one.
275 * Note that this is only temporary - each new batch recreates the buffers
276 * at their original target size (BATCH_SZ or STATE_SZ).
279 grow_buffer(struct iris_batch
*batch
,
280 struct iris_batch_buffer
*buf
,
283 struct iris_bufmgr
*bufmgr
= batch
->screen
->bufmgr
;
284 struct iris_bo
*bo
= buf
->bo
;
286 perf_debug(batch
->dbg
, "Growing %s - ran out of space\n", bo
->name
);
288 if (buf
->partial_bo
) {
289 /* We've already grown once, and now we need to do it again.
290 * Finish our last grow operation so we can start a new one.
291 * This should basically never happen.
293 perf_debug(batch
->dbg
, "Had to grow multiple times");
294 finish_growing_bos(buf
);
297 const unsigned existing_bytes
= buffer_bytes_used(buf
);
299 struct iris_bo
*new_bo
=
300 iris_bo_alloc(bufmgr
, bo
->name
, new_size
, bo
->align
);
302 buf
->map
= iris_bo_map(NULL
, new_bo
, MAP_READ
| MAP_WRITE
);
303 buf
->map_next
= buf
->map
+ existing_bytes
;
305 /* Try to put the new BO at the same GTT offset as the old BO (which
306 * we're throwing away, so it doesn't need to be there).
308 * This guarantees that our relocations continue to work: values we've
309 * already written into the buffer, values we're going to write into the
310 * buffer, and the validation/relocation lists all will match.
312 * Also preserve kflags for EXEC_OBJECT_CAPTURE.
314 new_bo
->gtt_offset
= bo
->gtt_offset
;
315 new_bo
->index
= bo
->index
;
316 new_bo
->kflags
= bo
->kflags
;
318 /* Batch/state buffers are per-context, and if we've run out of space,
319 * we must have actually used them before, so...they will be in the list.
321 assert(bo
->index
< batch
->exec_count
);
322 assert(batch
->exec_bos
[bo
->index
] == bo
);
324 /* Update the validation list to use the new BO. */
325 batch
->exec_bos
[bo
->index
] = new_bo
;
326 batch
->validation_list
[bo
->index
].handle
= new_bo
->gem_handle
;
328 /* Exchange the two BOs...without breaking pointers to the old BO.
330 * Consider this scenario:
332 * 1. Somebody calls iris_state_batch() to get a region of memory, and
333 * and then creates a iris_address pointing to iris->batch.state.bo.
334 * 2. They then call iris_state_batch() a second time, which happens to
335 * grow and replace the state buffer. They then try to emit a
336 * relocation to their first section of memory.
338 * If we replace the iris->batch.state.bo pointer at step 2, we would
339 * break the address created in step 1. They'd have a pointer to the
340 * old destroyed BO. Emitting a relocation would add this dead BO to
341 * the validation list...causing /both/ statebuffers to be in the list,
342 * and all kinds of disasters.
344 * This is not a contrived case - BLORP vertex data upload hits this.
346 * There are worse scenarios too. Fences for GL sync objects reference
347 * iris->batch.batch.bo. If we replaced the batch pointer when growing,
348 * we'd need to chase down every fence and update it to point to the
349 * new BO. Otherwise, it would refer to a "batch" that never actually
350 * gets submitted, and would fail to trigger.
352 * To work around both of these issues, we transmutate the buffers in
353 * place, making the existing struct iris_bo represent the new buffer,
354 * and "new_bo" represent the old BO. This is highly unusual, but it
355 * seems like a necessary evil.
357 * We also defer the memcpy of the existing batch's contents. Callers
358 * may make multiple iris_state_batch calls, and retain pointers to the
359 * old BO's map. We'll perform the memcpy in finish_growing_bo() when
360 * we finally submit the batch, at which point we've finished uploading
361 * state, and nobody should have any old references anymore.
363 * To do that, we keep a reference to the old BO in grow->partial_bo,
364 * and store the number of bytes to copy in grow->partial_bytes. We
365 * can monkey with the refcounts directly without atomics because these
366 * are per-context BOs and they can only be touched by this thread.
368 assert(new_bo
->refcount
== 1);
369 new_bo
->refcount
= bo
->refcount
;
373 memcpy(&tmp
, bo
, sizeof(struct iris_bo
));
374 memcpy(bo
, new_bo
, sizeof(struct iris_bo
));
375 memcpy(new_bo
, &tmp
, sizeof(struct iris_bo
));
377 buf
->partial_bo
= new_bo
; /* the one reference of the OLD bo */
378 buf
->partial_bytes
= existing_bytes
;
382 require_buffer_space(struct iris_batch
*batch
,
383 struct iris_batch_buffer
*buf
,
385 unsigned flush_threshold
,
386 unsigned max_buffer_size
)
388 const unsigned required_bytes
= buffer_bytes_used(buf
) + size
;
390 if (!batch
->no_wrap
&& required_bytes
>= flush_threshold
) {
391 iris_batch_flush(batch
);
392 } else if (required_bytes
>= buf
->bo
->size
) {
393 grow_buffer(batch
, buf
,
394 MIN2(buf
->bo
->size
+ buf
->bo
->size
/ 2, max_buffer_size
));
395 assert(required_bytes
< buf
->bo
->size
);
401 iris_require_command_space(struct iris_batch
*batch
, unsigned size
)
403 require_buffer_space(batch
, &batch
->cmdbuf
, size
, BATCH_SZ
, MAX_BATCH_SIZE
);
407 * Reserve some space in the statebuffer, or flush.
409 * This is used to estimate when we're near the end of the batch,
410 * so we can flush early.
413 iris_require_state_space(struct iris_batch
*batch
, unsigned size
)
415 require_buffer_space(batch
, &batch
->statebuf
, size
, STATE_SZ
,
420 iris_batch_emit(struct iris_batch
*batch
, const void *data
, unsigned size
)
422 iris_require_command_space(batch
, size
);
423 memcpy(batch
->cmdbuf
.map_next
, data
, size
);
424 batch
->cmdbuf
.map_next
+= size
;
428 * Called from iris_batch_flush before emitting MI_BATCHBUFFER_END and
431 * This function can emit state (say, to preserve registers that aren't saved
435 iris_finish_batch(struct iris_batch
*batch
)
437 batch
->no_wrap
= true;
439 /* Mark the end of the buffer. */
440 const uint32_t MI_BATCH_BUFFER_END
= (0xA << 23);
441 iris_batch_emit(batch
, &MI_BATCH_BUFFER_END
, sizeof(uint32_t));
443 batch
->no_wrap
= false;
447 submit_batch(struct iris_batch
*batch
, int in_fence_fd
, int *out_fence_fd
)
449 iris_bo_unmap(batch
->cmdbuf
.bo
);
450 iris_bo_unmap(batch
->statebuf
.bo
);
452 /* The requirement for using I915_EXEC_NO_RELOC are:
454 * The addresses written in the objects must match the corresponding
455 * reloc.gtt_offset which in turn must match the corresponding
458 * Any render targets written to in the batch must be flagged with
461 * To avoid stalling, execobject.offset should match the current
462 * address of that object within the active context.
464 /* Set statebuffer relocations */
465 const unsigned state_index
= batch
->statebuf
.bo
->index
;
466 if (state_index
< batch
->exec_count
&&
467 batch
->exec_bos
[state_index
] == batch
->statebuf
.bo
) {
468 struct drm_i915_gem_exec_object2
*entry
=
469 &batch
->validation_list
[state_index
];
470 assert(entry
->handle
== batch
->statebuf
.bo
->gem_handle
);
471 entry
->relocation_count
= batch
->statebuf
.relocs
.reloc_count
;
472 entry
->relocs_ptr
= (uintptr_t) batch
->statebuf
.relocs
.relocs
;
475 /* Set batchbuffer relocations */
476 struct drm_i915_gem_exec_object2
*entry
= &batch
->validation_list
[0];
477 assert(entry
->handle
== batch
->cmdbuf
.bo
->gem_handle
);
478 entry
->relocation_count
= batch
->cmdbuf
.relocs
.reloc_count
;
479 entry
->relocs_ptr
= (uintptr_t) batch
->cmdbuf
.relocs
.relocs
;
481 struct drm_i915_gem_execbuffer2 execbuf
= {
482 .buffers_ptr
= (uintptr_t) batch
->validation_list
,
483 .buffer_count
= batch
->exec_count
,
484 .batch_start_offset
= 0,
485 .batch_len
= buffer_bytes_used(&batch
->cmdbuf
),
486 .flags
= batch
->ring
|
488 I915_EXEC_BATCH_FIRST
|
489 I915_EXEC_HANDLE_LUT
,
490 .rsvd1
= batch
->hw_ctx_id
, /* rsvd1 is actually the context ID */
493 unsigned long cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2
;
495 if (in_fence_fd
!= -1) {
496 execbuf
.rsvd2
= in_fence_fd
;
497 execbuf
.flags
|= I915_EXEC_FENCE_IN
;
500 if (out_fence_fd
!= NULL
) {
501 cmd
= DRM_IOCTL_I915_GEM_EXECBUFFER2_WR
;
503 execbuf
.flags
|= I915_EXEC_FENCE_OUT
;
507 int ret
= drm_ioctl(batch
->screen
->fd
, cmd
, &execbuf
);
512 fprintf(stderr
, "execbuf disabled for now\n");
515 for (int i
= 0; i
< batch
->exec_count
; i
++) {
516 struct iris_bo
*bo
= batch
->exec_bos
[i
];
521 /* Update iris_bo::gtt_offset */
522 if (batch
->validation_list
[i
].offset
!= bo
->gtt_offset
) {
523 DBG("BO %d migrated: 0x%" PRIx64
" -> 0x%llx\n",
524 bo
->gem_handle
, bo
->gtt_offset
,
525 batch
->validation_list
[i
].offset
);
526 bo
->gtt_offset
= batch
->validation_list
[i
].offset
;
530 if (ret
== 0 && out_fence_fd
!= NULL
)
531 *out_fence_fd
= execbuf
.rsvd2
>> 32;
537 * The in_fence_fd is ignored if -1. Otherwise this function takes ownership
540 * The out_fence_fd is ignored if NULL. Otherwise, the caller takes ownership
541 * of the returned fd.
544 _iris_batch_flush_fence(struct iris_batch
*batch
,
545 int in_fence_fd
, int *out_fence_fd
,
546 const char *file
, int line
)
548 if (buffer_bytes_used(&batch
->cmdbuf
) == 0)
551 /* Check that we didn't just wrap our batchbuffer at a bad time. */
552 assert(!batch
->no_wrap
);
554 iris_finish_batch(batch
);
556 if (unlikely(INTEL_DEBUG
& (DEBUG_BATCH
| DEBUG_SUBMIT
))) {
557 int bytes_for_commands
= buffer_bytes_used(&batch
->cmdbuf
);
558 int bytes_for_state
= buffer_bytes_used(&batch
->statebuf
);
559 fprintf(stderr
, "%19s:%-3d: Batchbuffer flush with %5db (%0.1f%%) (pkt),"
560 " %5db (%0.1f%%) (state), %4d BOs (%0.1fMb aperture),"
561 " %4d batch relocs, %4d state relocs\n", file
, line
,
562 bytes_for_commands
, 100.0f
* bytes_for_commands
/ BATCH_SZ
,
563 bytes_for_state
, 100.0f
* bytes_for_state
/ STATE_SZ
,
565 (float) batch
->aperture_space
/ (1024 * 1024),
566 batch
->cmdbuf
.relocs
.reloc_count
,
567 batch
->statebuf
.relocs
.reloc_count
);
570 int ret
= submit_batch(batch
, in_fence_fd
, out_fence_fd
);
576 if (unlikely(INTEL_DEBUG
& DEBUG_BATCH
))
579 //if (iris->ctx.Const.ResetStrategy == GL_LOSE_CONTEXT_ON_RESET_ARB)
580 //iris_check_for_reset(ice);
582 if (unlikely(INTEL_DEBUG
& DEBUG_SYNC
)) {
583 dbg_printf("waiting for idle\n");
584 iris_bo_wait_rendering(batch
->cmdbuf
.bo
);
587 /* Clean up after the batch we submitted and prepare for a new one. */
588 for (int i
= 0; i
< batch
->exec_count
; i
++) {
589 iris_bo_unreference(batch
->exec_bos
[i
]);
590 batch
->exec_bos
[i
] = NULL
;
592 batch
->cmdbuf
.relocs
.reloc_count
= 0;
593 batch
->statebuf
.relocs
.reloc_count
= 0;
594 batch
->exec_count
= 0;
595 batch
->aperture_space
= 0;
597 iris_bo_unreference(batch
->statebuf
.bo
);
599 /* Start a new batch buffer. */
600 iris_batch_reset_and_clear_render_cache(batch
);
606 iris_batch_references(struct iris_batch
*batch
, struct iris_bo
*bo
)
608 unsigned index
= READ_ONCE(bo
->index
);
609 if (index
< batch
->exec_count
&& batch
->exec_bos
[index
] == bo
)
612 for (int i
= 0; i
< batch
->exec_count
; i
++) {
613 if (batch
->exec_bos
[i
] == bo
)
619 /* This is the only way buffers get added to the validate list.
622 emit_reloc(struct iris_batch
*batch
,
623 struct iris_reloc_list
*rlist
, uint32_t offset
,
624 struct iris_bo
*target
, uint32_t target_offset
,
625 unsigned int reloc_flags
)
627 assert(target
!= NULL
);
629 if (rlist
->reloc_count
== rlist
->reloc_array_size
) {
630 rlist
->reloc_array_size
*= 2;
631 rlist
->relocs
= realloc(rlist
->relocs
,
632 rlist
->reloc_array_size
*
633 sizeof(struct drm_i915_gem_relocation_entry
));
636 unsigned int index
= add_exec_bo(batch
, target
);
637 struct drm_i915_gem_exec_object2
*entry
= &batch
->validation_list
[index
];
639 rlist
->relocs
[rlist
->reloc_count
++] =
640 (struct drm_i915_gem_relocation_entry
) {
642 .delta
= target_offset
,
643 .target_handle
= index
,
644 .presumed_offset
= entry
->offset
,
647 /* Using the old buffer offset, write in what the right data would be, in
648 * case the buffer doesn't move and we can short-circuit the relocation
649 * processing in the kernel
651 return entry
->offset
+ target_offset
;
655 iris_batch_reloc(struct iris_batch
*batch
, uint32_t batch_offset
,
656 struct iris_bo
*target
, uint32_t target_offset
,
657 unsigned int reloc_flags
)
659 assert(batch_offset
<= batch
->cmdbuf
.bo
->size
- sizeof(uint32_t));
661 return emit_reloc(batch
, &batch
->cmdbuf
.relocs
, batch_offset
,
662 target
, target_offset
, reloc_flags
);
666 iris_state_reloc(struct iris_batch
*batch
, uint32_t state_offset
,
667 struct iris_bo
*target
, uint32_t target_offset
,
668 unsigned int reloc_flags
)
670 assert(state_offset
<= batch
->statebuf
.bo
->size
- sizeof(uint32_t));
672 return emit_reloc(batch
, &batch
->statebuf
.relocs
, state_offset
,
673 target
, target_offset
, reloc_flags
);
678 iris_state_entry_size(struct iris_batch
*batch
, uint32_t offset
)
680 struct hash_entry
*entry
=
681 _mesa_hash_table_search(batch
->state_sizes
, (void *)(uintptr_t) offset
);
682 return entry
? (uintptr_t) entry
->data
: 0;
686 * Allocates a block of space in the batchbuffer for indirect state.
689 iris_alloc_state(struct iris_batch
*batch
,
690 int size
, int alignment
,
691 uint32_t *out_offset
)
693 assert(size
< batch
->statebuf
.bo
->size
);
695 const unsigned existing_bytes
= buffer_bytes_used(&batch
->statebuf
);
696 unsigned aligned_size
=
697 ALIGN(existing_bytes
, alignment
) - existing_bytes
+ size
;
699 require_buffer_space(batch
, &batch
->statebuf
, aligned_size
,
700 STATE_SZ
, MAX_STATE_SIZE
);
702 unsigned offset
= ALIGN(buffer_bytes_used(&batch
->statebuf
), alignment
);
704 if (unlikely(batch
->state_sizes
)) {
705 _mesa_hash_table_insert(batch
->state_sizes
,
706 (void *) (uintptr_t) offset
,
707 (void *) (uintptr_t) size
);
710 batch
->statebuf
.map_next
+= aligned_size
;
712 *out_offset
= offset
;
713 return batch
->statebuf
.map_next
;
717 iris_emit_state(struct iris_batch
*batch
,
719 int size
, int alignment
)
722 void *dest
= iris_alloc_state(batch
, size
, alignment
, &out_offset
);
723 memcpy(dest
, data
, size
);
728 decode_batch(struct iris_batch
*batch
)
730 // XXX: decode the batch