1 /**************************************************************************
3 * Copyright 2007-2008 VMware, Inc.
4 * Copyright 2015 Advanced Micro Devices, Inc.
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
15 * The above copyright notice and this permission notice (including the
16 * next paragraph) shall be included in all copies or substantial portions
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
20 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
21 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
22 * IN NO EVENT SHALL AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR
23 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
24 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
25 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
27 **************************************************************************/
30 #include "util/u_memory.h"
31 #include "util/u_time.h"
35 * Actually destroy the buffer.
38 destroy_buffer_locked(struct pb_cache_entry
*entry
)
40 struct pb_cache
*mgr
= entry
->mgr
;
42 assert(!pipe_is_referenced(&entry
->buffer
->reference
));
43 if (entry
->head
.next
) {
44 LIST_DEL(&entry
->head
);
45 assert(mgr
->num_buffers
);
47 mgr
->cache_size
-= entry
->buffer
->size
;
49 entry
->mgr
->destroy_buffer(entry
->buffer
);
53 * Free as many cache buffers from the list head as possible.
56 release_expired_buffers_locked(struct list_head
*cache
)
58 struct list_head
*curr
, *next
;
59 struct pb_cache_entry
*entry
;
66 while (curr
!= cache
) {
67 entry
= LIST_ENTRY(struct pb_cache_entry
, curr
, head
);
69 if (!os_time_timeout(entry
->start
, entry
->end
, now
))
72 destroy_buffer_locked(entry
);
80 * Add a buffer to the cache. This is typically done when the buffer is
84 pb_cache_add_buffer(struct pb_cache_entry
*entry
)
86 struct pb_cache
*mgr
= entry
->mgr
;
87 struct list_head
*cache
= &mgr
->buckets
[entry
->bucket_index
];
90 pipe_mutex_lock(mgr
->mutex
);
91 assert(!pipe_is_referenced(&entry
->buffer
->reference
));
93 for (i
= 0; i
< ARRAY_SIZE(mgr
->buckets
); i
++)
94 release_expired_buffers_locked(&mgr
->buckets
[i
]);
96 /* Directly release any buffer that exceeds the limit. */
97 if (mgr
->cache_size
+ entry
->buffer
->size
> mgr
->max_cache_size
) {
98 entry
->mgr
->destroy_buffer(entry
->buffer
);
99 pipe_mutex_unlock(mgr
->mutex
);
103 entry
->start
= os_time_get();
104 entry
->end
= entry
->start
+ mgr
->usecs
;
105 LIST_ADDTAIL(&entry
->head
, cache
);
107 mgr
->cache_size
+= entry
->buffer
->size
;
108 pipe_mutex_unlock(mgr
->mutex
);
112 * \return 1 if compatible and can be reclaimed
114 * -1 if compatible and can't be reclaimed
117 pb_cache_is_buffer_compat(struct pb_cache_entry
*entry
,
118 pb_size size
, unsigned alignment
, unsigned usage
)
120 struct pb_cache
*mgr
= entry
->mgr
;
121 struct pb_buffer
*buf
= entry
->buffer
;
123 if (!pb_check_usage(usage
, buf
->usage
))
126 /* be lenient with size */
127 if (buf
->size
< size
||
128 buf
->size
> (unsigned) (mgr
->size_factor
* size
))
131 if (usage
& mgr
->bypass_usage
)
134 if (!pb_check_alignment(alignment
, buf
->alignment
))
137 return mgr
->can_reclaim(buf
) ? 1 : -1;
141 * Find a compatible buffer in the cache, return it, and remove it
145 pb_cache_reclaim_buffer(struct pb_cache
*mgr
, pb_size size
,
146 unsigned alignment
, unsigned usage
,
147 unsigned bucket_index
)
149 struct pb_cache_entry
*entry
;
150 struct pb_cache_entry
*cur_entry
;
151 struct list_head
*cur
, *next
;
154 struct list_head
*cache
= &mgr
->buckets
[bucket_index
];
156 pipe_mutex_lock(mgr
->mutex
);
162 /* search in the expired buffers, freeing them in the process */
164 while (cur
!= cache
) {
165 cur_entry
= LIST_ENTRY(struct pb_cache_entry
, cur
, head
);
167 if (!entry
&& (ret
= pb_cache_is_buffer_compat(cur_entry
, size
,
168 alignment
, usage
) > 0))
170 else if (os_time_timeout(cur_entry
->start
, cur_entry
->end
, now
))
171 destroy_buffer_locked(cur_entry
);
173 /* This buffer (and all hereafter) are still hot in cache */
176 /* the buffer is busy (and probably all remaining ones too) */
184 /* keep searching in the hot buffers */
185 if (!entry
&& ret
!= -1) {
186 while (cur
!= cache
) {
187 cur_entry
= LIST_ENTRY(struct pb_cache_entry
, cur
, head
);
188 ret
= pb_cache_is_buffer_compat(cur_entry
, size
, alignment
, usage
);
196 /* no need to check the timeout here */
202 /* found a compatible buffer, return it */
204 struct pb_buffer
*buf
= entry
->buffer
;
206 mgr
->cache_size
-= buf
->size
;
207 LIST_DEL(&entry
->head
);
209 pipe_mutex_unlock(mgr
->mutex
);
210 /* Increase refcount */
211 pipe_reference_init(&buf
->reference
, 1);
215 pipe_mutex_unlock(mgr
->mutex
);
220 * Empty the cache. Useful when there is not enough memory.
223 pb_cache_release_all_buffers(struct pb_cache
*mgr
)
225 struct list_head
*curr
, *next
;
226 struct pb_cache_entry
*buf
;
229 pipe_mutex_lock(mgr
->mutex
);
230 for (i
= 0; i
< ARRAY_SIZE(mgr
->buckets
); i
++) {
231 struct list_head
*cache
= &mgr
->buckets
[i
];
235 while (curr
!= cache
) {
236 buf
= LIST_ENTRY(struct pb_cache_entry
, curr
, head
);
237 destroy_buffer_locked(buf
);
242 pipe_mutex_unlock(mgr
->mutex
);
246 pb_cache_init_entry(struct pb_cache
*mgr
, struct pb_cache_entry
*entry
,
247 struct pb_buffer
*buf
, unsigned bucket_index
)
249 memset(entry
, 0, sizeof(*entry
));
252 entry
->bucket_index
= bucket_index
;
256 * Initialize a caching buffer manager.
258 * @param mgr The cache buffer manager
259 * @param usecs Unused buffers may be released from the cache after this
261 * @param size_factor Declare buffers that are size_factor times bigger than
262 * the requested size as cache hits.
263 * @param bypass_usage Bitmask. If (requested usage & bypass_usage) != 0,
264 * buffer allocation requests are rejected.
265 * @param maximum_cache_size Maximum size of all unused buffers the cache can
267 * @param destroy_buffer Function that destroys a buffer for good.
268 * @param can_reclaim Whether a buffer can be reclaimed (e.g. is not busy)
271 pb_cache_init(struct pb_cache
*mgr
, uint usecs
, float size_factor
,
272 unsigned bypass_usage
, uint64_t maximum_cache_size
,
273 void (*destroy_buffer
)(struct pb_buffer
*buf
),
274 bool (*can_reclaim
)(struct pb_buffer
*buf
))
278 for (i
= 0; i
< ARRAY_SIZE(mgr
->buckets
); i
++)
279 LIST_INITHEAD(&mgr
->buckets
[i
]);
281 pipe_mutex_init(mgr
->mutex
);
283 mgr
->max_cache_size
= maximum_cache_size
;
285 mgr
->num_buffers
= 0;
286 mgr
->bypass_usage
= bypass_usage
;
287 mgr
->size_factor
= size_factor
;
288 mgr
->destroy_buffer
= destroy_buffer
;
289 mgr
->can_reclaim
= can_reclaim
;
293 * Deinitialize the manager completely.
296 pb_cache_deinit(struct pb_cache
*mgr
)
298 pb_cache_release_all_buffers(mgr
);
299 pipe_mutex_destroy(mgr
->mutex
);