nouveau: use PRIu64 for printing uint64_t
[mesa.git] / src / gallium / drivers / nouveau / nouveau_mm.c
1
2 #include <inttypes.h>
3
4 #include "util/u_inlines.h"
5 #include "util/u_memory.h"
6 #include "util/u_double_list.h"
7
8 #include "nouveau_screen.h"
9 #include "nouveau_mm.h"
10
11 #include "nouveau/nouveau_bo.h"
12
13 #define MM_MIN_ORDER 7
14 #define MM_MAX_ORDER 20
15
16 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
17
18 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
19 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
20
21 struct mm_bucket {
22 struct list_head free;
23 struct list_head used;
24 struct list_head full;
25 int num_free;
26 };
27
28 struct nouveau_mman {
29 struct nouveau_device *dev;
30 struct mm_bucket bucket[MM_NUM_BUCKETS];
31 uint32_t storage_type;
32 uint32_t domain;
33 uint64_t allocated;
34 };
35
36 struct mm_slab {
37 struct list_head head;
38 struct nouveau_bo *bo;
39 struct nouveau_mman *cache;
40 int order;
41 int count;
42 int free;
43 uint32_t bits[0];
44 };
45
46 static int
47 mm_slab_alloc(struct mm_slab *slab)
48 {
49 int i, n, b;
50
51 if (slab->free == 0)
52 return -1;
53
54 for (i = 0; i < (slab->count + 31) / 32; ++i) {
55 b = ffs(slab->bits[i]) - 1;
56 if (b >= 0) {
57 n = i * 32 + b;
58 assert(n < slab->count);
59 slab->free--;
60 slab->bits[i] &= ~(1 << b);
61 return n;
62 }
63 }
64 return -1;
65 }
66
67 static INLINE void
68 mm_slab_free(struct mm_slab *slab, int i)
69 {
70 assert(i < slab->count);
71 slab->bits[i / 32] |= 1 << (i % 32);
72 slab->free++;
73 assert(slab->free <= slab->count);
74 }
75
76 static INLINE int
77 mm_get_order(uint32_t size)
78 {
79 int s = __builtin_clz(size) ^ 31;
80
81 if (size > (1 << s))
82 s += 1;
83 return s;
84 }
85
86 static struct mm_bucket *
87 mm_bucket_by_order(struct nouveau_mman *cache, int order)
88 {
89 if (order > MM_MAX_ORDER)
90 return NULL;
91 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
92 }
93
94 static struct mm_bucket *
95 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
96 {
97 return mm_bucket_by_order(cache, mm_get_order(size));
98 }
99
100 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
101 static INLINE uint32_t
102 mm_default_slab_size(unsigned chunk_order)
103 {
104 static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
105 {
106 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
107 };
108
109 assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
110
111 return 1 << slab_order[chunk_order - MM_MIN_ORDER];
112 }
113
114 static int
115 mm_slab_new(struct nouveau_mman *cache, int chunk_order)
116 {
117 struct mm_slab *slab;
118 int words, ret;
119 const uint32_t size = mm_default_slab_size(chunk_order);
120
121 words = ((size >> chunk_order) + 31) / 32;
122 assert(words);
123
124 slab = MALLOC(sizeof(struct mm_slab) + words * 4);
125 if (!slab)
126 return PIPE_ERROR_OUT_OF_MEMORY;
127
128 memset(&slab->bits[0], ~0, words * 4);
129
130 slab->bo = NULL;
131 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
132 0, cache->storage_type, &slab->bo);
133 if (ret) {
134 FREE(slab);
135 return PIPE_ERROR_OUT_OF_MEMORY;
136 }
137
138 LIST_INITHEAD(&slab->head);
139
140 slab->cache = cache;
141 slab->order = chunk_order;
142 slab->count = slab->free = size >> chunk_order;
143
144 LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
145
146 cache->allocated += size;
147
148 debug_printf("MM: new slab, total memory = %"PRIu64" KiB\n",
149 cache->allocated / 1024);
150
151 return PIPE_OK;
152 }
153
154 /* @return token to identify slab or NULL if we just allocated a new bo */
155 struct nouveau_mm_allocation *
156 nouveau_mm_allocate(struct nouveau_mman *cache,
157 uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
158 {
159 struct mm_bucket *bucket;
160 struct mm_slab *slab;
161 struct nouveau_mm_allocation *alloc;
162 int ret;
163
164 bucket = mm_bucket_by_size(cache, size);
165 if (!bucket) {
166 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
167 0, cache->storage_type, bo);
168 if (ret)
169 debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
170
171 *offset = 0;
172 return NULL;
173 }
174
175 if (!LIST_IS_EMPTY(&bucket->used)) {
176 slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
177 } else {
178 if (LIST_IS_EMPTY(&bucket->free)) {
179 mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
180 }
181 slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
182
183 LIST_DEL(&slab->head);
184 LIST_ADD(&slab->head, &bucket->used);
185 }
186
187 *offset = mm_slab_alloc(slab) << slab->order;
188
189 alloc = MALLOC_STRUCT(nouveau_mm_allocation);
190 if (!alloc)
191 return NULL;
192
193 nouveau_bo_ref(slab->bo, bo);
194
195 if (slab->free == 0) {
196 LIST_DEL(&slab->head);
197 LIST_ADD(&slab->head, &bucket->full);
198 }
199
200 alloc->next = NULL;
201 alloc->offset = *offset;
202 alloc->priv = (void *)slab;
203
204 return alloc;
205 }
206
207 void
208 nouveau_mm_free(struct nouveau_mm_allocation *alloc)
209 {
210 struct mm_slab *slab = (struct mm_slab *)alloc->priv;
211 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
212
213 mm_slab_free(slab, alloc->offset >> slab->order);
214
215 if (slab->free == slab->count) {
216 LIST_DEL(&slab->head);
217 LIST_ADDTAIL(&slab->head, &bucket->free);
218 } else
219 if (slab->free == 1) {
220 LIST_DEL(&slab->head);
221 LIST_ADDTAIL(&slab->head, &bucket->used);
222 }
223
224 FREE(alloc);
225 }
226
227 void
228 nouveau_mm_free_work(void *data)
229 {
230 nouveau_mm_free(data);
231 }
232
233 struct nouveau_mman *
234 nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
235 uint32_t storage_type)
236 {
237 struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
238 int i;
239
240 if (!cache)
241 return NULL;
242
243 cache->dev = dev;
244 cache->domain = domain;
245 cache->storage_type = storage_type;
246 cache->allocated = 0;
247
248 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
249 LIST_INITHEAD(&cache->bucket[i].free);
250 LIST_INITHEAD(&cache->bucket[i].used);
251 LIST_INITHEAD(&cache->bucket[i].full);
252 }
253
254 return cache;
255 }
256
257 static INLINE void
258 nouveau_mm_free_slabs(struct list_head *head)
259 {
260 struct mm_slab *slab, *next;
261
262 LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
263 LIST_DEL(&slab->head);
264 nouveau_bo_ref(NULL, &slab->bo);
265 FREE(slab);
266 }
267 }
268
269 void
270 nouveau_mm_destroy(struct nouveau_mman *cache)
271 {
272 int i;
273
274 if (!cache)
275 return;
276
277 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
278 if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
279 !LIST_IS_EMPTY(&cache->bucket[i].full))
280 debug_printf("WARNING: destroying GPU memory cache "
281 "with some buffers still in use\n");
282
283 nouveau_mm_free_slabs(&cache->bucket[i].free);
284 nouveau_mm_free_slabs(&cache->bucket[i].used);
285 nouveau_mm_free_slabs(&cache->bucket[i].full);
286 }
287
288 FREE(cache);
289 }
290