batch_init(struct fd_batch *batch)
{
struct fd_context *ctx = batch->ctx;
+ enum fd_ringbuffer_flags flags = 0;
unsigned size = 0;
/* if kernel is too old to support unlimited # of cmd buffers, we
if ((fd_device_version(ctx->screen->dev) < FD_VERSION_UNLIMITED_CMDS) ||
(fd_mesa_debug & FD_DBG_NOGROW)){
size = 0x100000;
+ } else {
+ flags = FD_RINGBUFFER_GROWABLE;
}
batch->submit = fd_submit_new(ctx->pipe);
if (batch->nondraw) {
batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
+ FD_RINGBUFFER_PRIMARY | flags);
} else {
batch->gmem = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_PRIMARY | FD_RINGBUFFER_GROWABLE);
+ FD_RINGBUFFER_PRIMARY | flags);
batch->draw = fd_submit_new_ringbuffer(batch->submit, size,
- FD_RINGBUFFER_GROWABLE);
+ flags);
if (ctx->screen->gpu_id < 600) {
batch->binning = fd_submit_new_ringbuffer(batch->submit,
- size, FD_RINGBUFFER_GROWABLE);
+ size, flags);
}
}