2 * Copyright (C) 2016 Etnaviv Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
27 #include "etnaviv_priv.h"
28 #include "etnaviv_drmif.h"
30 void _etna_bo_del(struct etna_bo
*bo
);
31 extern pthread_mutex_t etna_drm_table_lock
;
33 static void add_bucket(struct etna_bo_cache
*cache
, int size
)
35 unsigned i
= cache
->num_buckets
;
37 assert(i
< ARRAY_SIZE(cache
->cache_bucket
));
39 list_inithead(&cache
->cache_bucket
[i
].list
);
40 cache
->cache_bucket
[i
].size
= size
;
44 void etna_bo_cache_init(struct etna_bo_cache
*cache
)
46 unsigned long size
, cache_max_size
= 64 * 1024 * 1024;
48 /* OK, so power of two buckets was too wasteful of memory.
49 * Give 3 other sizes between each power of two, to hopefully
50 * cover things accurately enough. (The alternative is
51 * probably to just go for exact matching of sizes, and assume
52 * that for things like composited window resize the tiled
53 * width/height alignment and rounding of sizes to pages will
54 * get us useful cache hit rates anyway)
56 add_bucket(cache
, 4096);
57 add_bucket(cache
, 4096 * 2);
58 add_bucket(cache
, 4096 * 3);
60 /* Initialize the linked lists for BO reuse cache. */
61 for (size
= 4 * 4096; size
<= cache_max_size
; size
*= 2) {
62 add_bucket(cache
, size
);
63 add_bucket(cache
, size
+ size
* 1 / 4);
64 add_bucket(cache
, size
+ size
* 2 / 4);
65 add_bucket(cache
, size
+ size
* 3 / 4);
69 /* Frees older cached buffers. Called under etna_drm_table_lock */
70 void etna_bo_cache_cleanup(struct etna_bo_cache
*cache
, time_t time
)
74 if (cache
->time
== time
)
77 for (i
= 0; i
< cache
->num_buckets
; i
++) {
78 struct etna_bo_bucket
*bucket
= &cache
->cache_bucket
[i
];
81 while (!LIST_IS_EMPTY(&bucket
->list
)) {
82 bo
= LIST_ENTRY(struct etna_bo
, bucket
->list
.next
, list
);
84 /* keep things in cache for at least 1 second: */
85 if (time
&& ((time
- bo
->free_time
) <= 1))
96 static struct etna_bo_bucket
*get_bucket(struct etna_bo_cache
*cache
, uint32_t size
)
100 /* hmm, this is what intel does, but I suppose we could calculate our
101 * way to the correct bucket size rather than looping..
103 for (i
= 0; i
< cache
->num_buckets
; i
++) {
104 struct etna_bo_bucket
*bucket
= &cache
->cache_bucket
[i
];
105 if (bucket
->size
>= size
) {
113 static int is_idle(struct etna_bo
*bo
)
115 return etna_bo_cpu_prep(bo
,
117 DRM_ETNA_PREP_WRITE
|
118 DRM_ETNA_PREP_NOSYNC
) == 0;
121 static struct etna_bo
*find_in_bucket(struct etna_bo_bucket
*bucket
, uint32_t flags
)
123 struct etna_bo
*bo
= NULL
, *tmp
;
125 pthread_mutex_lock(&etna_drm_table_lock
);
127 if (LIST_IS_EMPTY(&bucket
->list
))
130 LIST_FOR_EACH_ENTRY_SAFE(bo
, tmp
, &bucket
->list
, list
) {
131 /* skip BOs with different flags */
132 if (bo
->flags
!= flags
)
135 /* check if the first BO with matching flags is idle */
137 list_delinit(&bo
->list
);
141 /* If the oldest BO is still busy, don't try younger ones */
145 /* There was no matching buffer found */
149 pthread_mutex_unlock(&etna_drm_table_lock
);
154 /* allocate a new (un-tiled) buffer object
156 * NOTE: size is potentially rounded up to bucket size
158 struct etna_bo
*etna_bo_cache_alloc(struct etna_bo_cache
*cache
, uint32_t *size
,
162 struct etna_bo_bucket
*bucket
;
164 *size
= ALIGN(*size
, 4096);
165 bucket
= get_bucket(cache
, *size
);
167 /* see if we can be green and recycle: */
169 *size
= bucket
->size
;
170 bo
= find_in_bucket(bucket
, flags
);
172 p_atomic_set(&bo
->refcnt
, 1);
173 etna_device_ref(bo
->dev
);
181 int etna_bo_cache_free(struct etna_bo_cache
*cache
, struct etna_bo
*bo
)
183 struct etna_bo_bucket
*bucket
= get_bucket(cache
, bo
->size
);
185 /* see if we can be green and recycle: */
187 struct timespec time
;
189 clock_gettime(CLOCK_MONOTONIC
, &time
);
191 bo
->free_time
= time
.tv_sec
;
192 list_addtail(&bo
->list
, &bucket
->list
);
193 etna_bo_cache_cleanup(cache
, time
.tv_sec
);
195 /* bo's in the bucket cache don't have a ref and
196 * don't hold a ref to the dev:
198 etna_device_del_locked(bo
->dev
);