tc_assert(next->num_total_call_slots != 0);
tc_batch_check(next);
tc_debug_check(tc);
+ tc->bytes_mapped_estimate = 0;
p_atomic_add(&tc->num_offloaded_slots, next->num_total_call_slots);
if (next->token) {
/* .. and execute unflushed calls directly. */
if (next->num_total_call_slots) {
p_atomic_add(&tc->num_direct_slots, next->num_total_call_slots);
+ tc->bytes_mapped_estimate = 0;
tc_batch_execute(next, 0);
synced = true;
}
usage & PIPE_TRANSFER_DISCARD_RANGE ? " discard_range" :
usage & PIPE_TRANSFER_READ ? " read" : " ??");
+ tc->bytes_mapped_estimate += box->width;
+
return pipe->transfer_map(pipe, tres->latest ? tres->latest : resource,
level, usage, box, transfer);
}
pipe->transfer_unmap(pipe, payload->transfer);
}
+static void
+tc_flush(struct pipe_context *_pipe, struct pipe_fence_handle **fence,
+ unsigned flags);
+
static void
tc_transfer_unmap(struct pipe_context *_pipe, struct pipe_transfer *transfer)
{
}
tc_add_small_call(tc, TC_CALL_transfer_unmap)->transfer = transfer;
+
+ /* tc_transfer_map directly maps the buffers, but tc_transfer_unmap
+ * defers the unmap operation to the batch execution.
+ * bytes_mapped_estimate is an estimation of the map/unmap bytes delta
+ * and if it goes over an optional limit the current batch is flushed,
+ * to reclaim some RAM. */
+ if (!ttrans->staging && tc->bytes_mapped_limit &&
+ tc->bytes_mapped_estimate > tc->bytes_mapped_limit) {
+ tc_flush(_pipe, NULL, PIPE_FLUSH_ASYNC);
+ }
}
struct tc_buffer_subdata {