* The idea is to have batches as small as possible but large enough so that
* the queuing and mutex overhead is negligible.
*/
-#define TC_CALLS_PER_BATCH 192
+#define TC_CALLS_PER_BATCH 768
/* Threshold for when to use the queue or sync. */
#define TC_MAX_STRING_MARKER_BYTES 512
unsigned num_direct_slots;
unsigned num_syncs;
+ /* Estimation of how much vram/gtt bytes are mmap'd in
+ * the current tc_batch.
+ */
+ uint64_t bytes_mapped_estimate;
+ uint64_t bytes_mapped_limit;
+
struct util_queue queue;
struct util_queue_fence *fence;
return (struct threaded_transfer*)transfer;
}
-static inline struct pipe_context *
-threaded_context_unwrap_unsync(struct pipe_context *pipe)
-{
- if (!pipe || !pipe->priv)
- return pipe;
- return (struct pipe_context*)pipe->priv;
-}
-
static inline void
tc_unflushed_batch_token_reference(struct tc_unflushed_batch_token **dst,
struct tc_unflushed_batch_token *src)