Merge branch 'gallium-tex-surfaces' into gallium-0.1
[mesa.git] / src / gallium / auxiliary / pipebuffer / pb_bufmgr_slab.c
1 /**************************************************************************
2 *
3 * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX., USA
4 * All Rights Reserved.
5 *
6 * Permission is hereby granted, FREE of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sub license, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
18 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
19 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
20 * USE OR OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * The above copyright notice and this permission notice (including the
23 * next paragraph) shall be included in all copies or substantial portions
24 * of the Software.
25 *
26 *
27 **************************************************************************/
28
29 /**
30 * @file
31 * S-lab pool implementation.
32 *
33 * @author Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
34 * @author Jose Fonseca <jrfonseca@tungstengraphics.com>
35 */
36
37 #include "pipe/p_compiler.h"
38 #include "pipe/p_error.h"
39 #include "pipe/p_debug.h"
40 #include "pipe/p_thread.h"
41 #include "pipe/p_defines.h"
42 #include "pipe/p_util.h"
43 #include "util/u_double_list.h"
44 #include "util/u_time.h"
45
46 #include "pb_buffer.h"
47 #include "pb_bufmgr.h"
48
49
50 struct pb_slab;
51
52 struct pb_slab_buffer
53 {
54 struct pb_buffer base;
55
56 struct pb_slab *slab;
57 struct list_head head;
58 unsigned mapCount;
59 size_t start;
60 _glthread_Cond event;
61 };
62
63 struct pb_slab
64 {
65 struct list_head head;
66 struct list_head freeBuffers;
67 size_t numBuffers;
68 size_t numFree;
69 struct pb_slab_buffer *buffers;
70 struct pb_slab_manager *mgr;
71
72 struct pb_buffer *bo;
73 void *virtual;
74 };
75
76 struct pb_slab_manager
77 {
78 struct pb_manager base;
79
80 struct pb_manager *provider;
81 size_t bufSize;
82 size_t slabSize;
83 struct pb_desc desc;
84
85 struct list_head slabs;
86 struct list_head freeSlabs;
87
88 _glthread_Mutex mutex;
89 };
90
91 /**
92 * The data of this structure remains constant after
93 * initialization and thus needs no mutex protection.
94 */
95 struct pb_slab_range_manager
96 {
97 struct pb_manager base;
98
99 struct pb_manager *provider;
100 size_t minBufSize;
101 size_t maxBufSize;
102 struct pb_desc desc;
103
104 unsigned numBuckets;
105 size_t *bucketSizes;
106 struct pb_manager **buckets;
107 };
108
109
110 static INLINE struct pb_slab_buffer *
111 pb_slab_buffer(struct pb_buffer *buf)
112 {
113 assert(buf);
114 return (struct pb_slab_buffer *)buf;
115 }
116
117
118 static INLINE struct pb_slab_manager *
119 pb_slab_manager(struct pb_manager *mgr)
120 {
121 assert(mgr);
122 return (struct pb_slab_manager *)mgr;
123 }
124
125
126 static INLINE struct pb_slab_range_manager *
127 pb_slab_range_manager(struct pb_manager *mgr)
128 {
129 assert(mgr);
130 return (struct pb_slab_range_manager *)mgr;
131 }
132
133
134 /**
135 * Delete a buffer from the slab delayed list and put
136 * it on the slab FREE list.
137 */
138 static void
139 pb_slab_buffer_destroy(struct pb_buffer *_buf)
140 {
141 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
142 struct pb_slab *slab = buf->slab;
143 struct pb_slab_manager *mgr = slab->mgr;
144 struct list_head *list = &buf->head;
145
146 _glthread_LOCK_MUTEX(mgr->mutex);
147
148 assert(buf->base.base.refcount == 0);
149
150 buf->mapCount = 0;
151
152 LIST_DEL(list);
153 LIST_ADDTAIL(list, &slab->freeBuffers);
154 slab->numFree++;
155
156 if (slab->head.next == &slab->head)
157 LIST_ADDTAIL(&slab->head, &mgr->slabs);
158
159 if (slab->numFree == slab->numBuffers) {
160 list = &slab->head;
161 LIST_DEL(list);
162 LIST_ADDTAIL(list, &mgr->freeSlabs);
163 }
164
165 if (mgr->slabs.next == &mgr->slabs || slab->numFree
166 != slab->numBuffers) {
167
168 struct list_head *next;
169
170 for (list = mgr->freeSlabs.next, next = list->next; list
171 != &mgr->freeSlabs; list = next, next = list->next) {
172
173 slab = LIST_ENTRY(struct pb_slab, list, head);
174
175 LIST_DELINIT(list);
176 pb_reference(&slab->bo, NULL);
177 FREE(slab->buffers);
178 FREE(slab);
179 }
180 }
181
182 _glthread_UNLOCK_MUTEX(mgr->mutex);
183 }
184
185
186 static void *
187 pb_slab_buffer_map(struct pb_buffer *_buf,
188 unsigned flags)
189 {
190 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
191
192 ++buf->mapCount;
193 return (void *) ((uint8_t *) buf->slab->virtual + buf->start);
194 }
195
196
197 static void
198 pb_slab_buffer_unmap(struct pb_buffer *_buf)
199 {
200 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
201
202 --buf->mapCount;
203 if (buf->mapCount == 0)
204 _glthread_COND_BROADCAST(buf->event);
205 }
206
207
208 static void
209 pb_slab_buffer_get_base_buffer(struct pb_buffer *_buf,
210 struct pb_buffer **base_buf,
211 unsigned *offset)
212 {
213 struct pb_slab_buffer *buf = pb_slab_buffer(_buf);
214 pb_get_base_buffer(buf->slab->bo, base_buf, offset);
215 *offset += buf->start;
216 }
217
218
219 static const struct pb_vtbl
220 pb_slab_buffer_vtbl = {
221 pb_slab_buffer_destroy,
222 pb_slab_buffer_map,
223 pb_slab_buffer_unmap,
224 pb_slab_buffer_get_base_buffer
225 };
226
227
228 static enum pipe_error
229 pb_slab_create(struct pb_slab_manager *mgr)
230 {
231 struct pb_slab *slab;
232 struct pb_slab_buffer *buf;
233 unsigned numBuffers;
234 unsigned i;
235 enum pipe_error ret;
236
237 slab = CALLOC_STRUCT(pb_slab);
238 if (!slab)
239 return PIPE_ERROR_OUT_OF_MEMORY;
240
241 /*
242 * FIXME: We should perhaps allow some variation in slabsize in order
243 * to efficiently reuse slabs.
244 */
245
246 slab->bo = mgr->provider->create_buffer(mgr->provider, mgr->slabSize, &mgr->desc);
247 if(!slab->bo) {
248 ret = PIPE_ERROR_OUT_OF_MEMORY;
249 goto out_err0;
250 }
251
252 slab->virtual = pb_map(slab->bo,
253 PIPE_BUFFER_USAGE_CPU_READ |
254 PIPE_BUFFER_USAGE_CPU_WRITE);
255 if(!slab->virtual) {
256 ret = PIPE_ERROR_OUT_OF_MEMORY;
257 goto out_err1;
258 }
259
260 pb_unmap(slab->bo);
261
262 numBuffers = slab->bo->base.size / mgr->bufSize;
263
264 slab->buffers = CALLOC(numBuffers, sizeof(*slab->buffers));
265 if (!slab->buffers) {
266 ret = PIPE_ERROR_OUT_OF_MEMORY;
267 goto out_err1;
268 }
269
270 LIST_INITHEAD(&slab->head);
271 LIST_INITHEAD(&slab->freeBuffers);
272 slab->numBuffers = numBuffers;
273 slab->numFree = 0;
274 slab->mgr = mgr;
275
276 buf = slab->buffers;
277 for (i=0; i < numBuffers; ++i) {
278 buf->base.base.refcount = 0;
279 buf->base.base.size = mgr->bufSize;
280 buf->base.base.alignment = 0;
281 buf->base.base.usage = 0;
282 buf->base.vtbl = &pb_slab_buffer_vtbl;
283 buf->slab = slab;
284 buf->start = i* mgr->bufSize;
285 buf->mapCount = 0;
286 _glthread_INIT_COND(buf->event);
287 LIST_ADDTAIL(&buf->head, &slab->freeBuffers);
288 slab->numFree++;
289 buf++;
290 }
291
292 LIST_ADDTAIL(&slab->head, &mgr->slabs);
293
294 return PIPE_OK;
295
296 out_err1:
297 pb_reference(&slab->bo, NULL);
298 out_err0:
299 FREE(slab);
300 return ret;
301 }
302
303
304 static struct pb_buffer *
305 pb_slab_manager_create_buffer(struct pb_manager *_mgr,
306 size_t size,
307 const struct pb_desc *desc)
308 {
309 struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
310 static struct pb_slab_buffer *buf;
311 struct pb_slab *slab;
312 struct list_head *list;
313
314 /* check size */
315 assert(size <= mgr->bufSize);
316 if(size > mgr->bufSize)
317 return NULL;
318
319 /* check if we can provide the requested alignment */
320 assert(pb_check_alignment(desc->alignment, mgr->desc.alignment));
321 if(!pb_check_alignment(desc->alignment, mgr->desc.alignment))
322 return NULL;
323 assert(pb_check_alignment(desc->alignment, mgr->bufSize));
324 if(!pb_check_alignment(desc->alignment, mgr->bufSize))
325 return NULL;
326
327 /* XXX: check for compatible buffer usage too? */
328
329 _glthread_LOCK_MUTEX(mgr->mutex);
330 if (mgr->slabs.next == &mgr->slabs) {
331 (void) pb_slab_create(mgr);
332 if (mgr->slabs.next == &mgr->slabs) {
333 _glthread_UNLOCK_MUTEX(mgr->mutex);
334 return NULL;
335 }
336 }
337 list = mgr->slabs.next;
338 slab = LIST_ENTRY(struct pb_slab, list, head);
339 if (--slab->numFree == 0)
340 LIST_DELINIT(list);
341
342 list = slab->freeBuffers.next;
343 LIST_DELINIT(list);
344
345 _glthread_UNLOCK_MUTEX(mgr->mutex);
346 buf = LIST_ENTRY(struct pb_slab_buffer, list, head);
347
348 ++buf->base.base.refcount;
349 buf->base.base.alignment = desc->alignment;
350 buf->base.base.usage = desc->usage;
351
352 return &buf->base;
353 }
354
355
356 static void
357 pb_slab_manager_destroy(struct pb_manager *_mgr)
358 {
359 struct pb_slab_manager *mgr = pb_slab_manager(_mgr);
360
361 /* TODO: cleanup all allocated buffers */
362 FREE(mgr);
363 }
364
365
366 struct pb_manager *
367 pb_slab_manager_create(struct pb_manager *provider,
368 size_t bufSize,
369 size_t slabSize,
370 const struct pb_desc *desc)
371 {
372 struct pb_slab_manager *mgr;
373
374 mgr = CALLOC_STRUCT(pb_slab_manager);
375 if (!mgr)
376 return NULL;
377
378 mgr->base.destroy = pb_slab_manager_destroy;
379 mgr->base.create_buffer = pb_slab_manager_create_buffer;
380
381 mgr->provider = provider;
382 mgr->bufSize = bufSize;
383 mgr->slabSize = slabSize;
384 mgr->desc = *desc;
385
386 LIST_INITHEAD(&mgr->slabs);
387 LIST_INITHEAD(&mgr->freeSlabs);
388
389 _glthread_INIT_MUTEX(mgr->mutex);
390
391 return &mgr->base;
392 }
393
394
395 static struct pb_buffer *
396 pb_slab_range_manager_create_buffer(struct pb_manager *_mgr,
397 size_t size,
398 const struct pb_desc *desc)
399 {
400 struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
401 size_t bufSize;
402 unsigned i;
403
404 bufSize = mgr->minBufSize;
405 for (i = 0; i < mgr->numBuckets; ++i) {
406 if(bufSize >= size)
407 return mgr->buckets[i]->create_buffer(mgr->buckets[i], size, desc);
408 bufSize *= 2;
409 }
410
411 /* Fall back to allocate a buffer object directly from the provider. */
412 return mgr->provider->create_buffer(mgr->provider, size, desc);
413 }
414
415
416 static void
417 pb_slab_range_manager_destroy(struct pb_manager *_mgr)
418 {
419 struct pb_slab_range_manager *mgr = pb_slab_range_manager(_mgr);
420 unsigned i;
421
422 for (i = 0; i < mgr->numBuckets; ++i)
423 mgr->buckets[i]->destroy(mgr->buckets[i]);
424 FREE(mgr->buckets);
425 FREE(mgr->bucketSizes);
426 FREE(mgr);
427 }
428
429
430 struct pb_manager *
431 pb_slab_range_manager_create(struct pb_manager *provider,
432 size_t minBufSize,
433 size_t maxBufSize,
434 size_t slabSize,
435 const struct pb_desc *desc)
436 {
437 struct pb_slab_range_manager *mgr;
438 size_t bufSize;
439 unsigned i;
440
441 mgr = CALLOC_STRUCT(pb_slab_range_manager);
442 if (!mgr)
443 goto out_err0;
444
445 mgr->base.destroy = pb_slab_range_manager_destroy;
446 mgr->base.create_buffer = pb_slab_range_manager_create_buffer;
447
448 mgr->provider = provider;
449 mgr->minBufSize = minBufSize;
450 mgr->maxBufSize = maxBufSize;
451
452 mgr->numBuckets = 1;
453 bufSize = minBufSize;
454 while(bufSize < maxBufSize) {
455 bufSize *= 2;
456 ++mgr->numBuckets;
457 }
458
459 mgr->buckets = CALLOC(mgr->numBuckets, sizeof(*mgr->buckets));
460 if (!mgr->buckets)
461 goto out_err1;
462
463 bufSize = minBufSize;
464 for (i = 0; i < mgr->numBuckets; ++i) {
465 mgr->buckets[i] = pb_slab_manager_create(provider, bufSize, slabSize, desc);
466 if(!mgr->buckets[i])
467 goto out_err2;
468 bufSize *= 2;
469 }
470
471 return &mgr->base;
472
473 out_err2:
474 for (i = 0; i < mgr->numBuckets; ++i)
475 if(mgr->buckets[i])
476 mgr->buckets[i]->destroy(mgr->buckets[i]);
477 FREE(mgr->buckets);
478 out_err1:
479 FREE(mgr);
480 out_err0:
481 return NULL;
482 }