require_buffer_space(batch, &batch->cmdbuf, size, BATCH_SZ, MAX_BATCH_SIZE);
}
+void *
+iris_get_command_space(struct iris_batch *batch, unsigned bytes)
+{
+ iris_require_command_space(batch, bytes);
+ void *map = batch->cmdbuf.map_next;
+ batch->cmdbuf.map_next += bytes;
+ return map;
+}
+
void
iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size)
{
- iris_require_command_space(batch, size);
- memcpy(batch->cmdbuf.map_next, data, size);
- batch->cmdbuf.map_next += size;
+ void *map = iris_get_command_space(batch, size);
+ memcpy(map, data, size);
}
/**
uint8_t ring);
void iris_batch_free(struct iris_batch *batch);
void iris_require_command_space(struct iris_batch *batch, unsigned size);
+void *iris_get_command_space(struct iris_batch *batch, unsigned bytes);
void iris_batch_emit(struct iris_batch *batch, const void *data, unsigned size);
int _iris_batch_flush_fence(struct iris_batch *batch,
#define __genxml_cmd_header(cmd) cmd ## _header
#define __genxml_cmd_pack(cmd) cmd ## _pack
-static void *
-get_command_space(struct iris_batch *batch, unsigned bytes)
-{
- iris_require_command_space(batch, bytes);
- void *map = batch->cmdbuf.map_next;
- batch->cmdbuf.map_next += bytes;
- return map;
-}
-
#define _iris_pack_command(batch, cmd, dst, name) \
for (struct cmd name = { __genxml_cmd_header(cmd) }, \
*_dst = (void *)(dst); __builtin_expect(_dst != NULL, 1); \
_dst = NULL)
#define iris_emit_cmd(batch, cmd, name) \
- _iris_pack_command(batch, cmd, get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
+ _iris_pack_command(batch, cmd, iris_get_command_space(batch, 4 * __genxml_cmd_length(cmd)), name)
#define iris_emit_merge(batch, dwords0, dwords1, num_dwords) \
do { \
- uint32_t *dw = get_command_space(batch, 4 * num_dwords); \
+ uint32_t *dw = iris_get_command_space(batch, 4 * num_dwords); \
for (uint32_t i = 0; i < num_dwords; i++) \
dw[i] = (dwords0)[i] | (dwords1)[i]; \
VG(VALGRIND_CHECK_MEM_IS_DEFINED(dw, num_dwords)); \