gallium/nouveau: fix printf warnings
[mesa.git] / src / gallium / drivers / nouveau / nouveau_mm.c
1
2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_double_list.h"
5
6 #include "nouveau_screen.h"
7 #include "nouveau_mm.h"
8
9 #include "nouveau/nouveau_bo.h"
10
11 #define MM_MIN_ORDER 7
12 #define MM_MAX_ORDER 20
13
14 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
15
16 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
17 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
18
19 struct mm_bucket {
20 struct list_head free;
21 struct list_head used;
22 struct list_head full;
23 int num_free;
24 };
25
26 struct nouveau_mman {
27 struct nouveau_device *dev;
28 struct mm_bucket bucket[MM_NUM_BUCKETS];
29 uint32_t storage_type;
30 uint32_t domain;
31 uint64_t allocated;
32 };
33
34 struct mm_slab {
35 struct list_head head;
36 struct nouveau_bo *bo;
37 struct nouveau_mman *cache;
38 int order;
39 int count;
40 int free;
41 uint32_t bits[0];
42 };
43
44 static int
45 mm_slab_alloc(struct mm_slab *slab)
46 {
47 int i, n, b;
48
49 if (slab->free == 0)
50 return -1;
51
52 for (i = 0; i < (slab->count + 31) / 32; ++i) {
53 b = ffs(slab->bits[i]) - 1;
54 if (b >= 0) {
55 n = i * 32 + b;
56 assert(n < slab->count);
57 slab->free--;
58 slab->bits[i] &= ~(1 << b);
59 return n;
60 }
61 }
62 return -1;
63 }
64
65 static INLINE void
66 mm_slab_free(struct mm_slab *slab, int i)
67 {
68 assert(i < slab->count);
69 slab->bits[i / 32] |= 1 << (i % 32);
70 slab->free++;
71 assert(slab->free <= slab->count);
72 }
73
74 static INLINE int
75 mm_get_order(uint32_t size)
76 {
77 int s = __builtin_clz(size) ^ 31;
78
79 if (size > (1 << s))
80 s += 1;
81 return s;
82 }
83
84 static struct mm_bucket *
85 mm_bucket_by_order(struct nouveau_mman *cache, int order)
86 {
87 if (order > MM_MAX_ORDER)
88 return NULL;
89 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
90 }
91
92 static struct mm_bucket *
93 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
94 {
95 return mm_bucket_by_order(cache, mm_get_order(size));
96 }
97
98 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
99 static INLINE uint32_t
100 mm_default_slab_size(unsigned chunk_order)
101 {
102 static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
103 {
104 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
105 };
106
107 assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
108
109 return 1 << slab_order[chunk_order - MM_MIN_ORDER];
110 }
111
112 static int
113 mm_slab_new(struct nouveau_mman *cache, int chunk_order)
114 {
115 struct mm_slab *slab;
116 int words, ret;
117 const uint32_t size = mm_default_slab_size(chunk_order);
118
119 words = ((size >> chunk_order) + 31) / 32;
120 assert(words);
121
122 slab = MALLOC(sizeof(struct mm_slab) + words * 4);
123 if (!slab)
124 return PIPE_ERROR_OUT_OF_MEMORY;
125
126 memset(&slab->bits[0], ~0, words * 4);
127
128 slab->bo = NULL;
129 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
130 0, cache->storage_type, &slab->bo);
131 if (ret) {
132 FREE(slab);
133 return PIPE_ERROR_OUT_OF_MEMORY;
134 }
135
136 LIST_INITHEAD(&slab->head);
137
138 slab->cache = cache;
139 slab->order = chunk_order;
140 slab->count = slab->free = size >> chunk_order;
141
142 LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
143
144 cache->allocated += size;
145
146 debug_printf("MM: new slab, total memory = %llu KiB\n",
147 cache->allocated / 1024);
148
149 return PIPE_OK;
150 }
151
152 /* @return token to identify slab or NULL if we just allocated a new bo */
153 struct nouveau_mm_allocation *
154 nouveau_mm_allocate(struct nouveau_mman *cache,
155 uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
156 {
157 struct mm_bucket *bucket;
158 struct mm_slab *slab;
159 struct nouveau_mm_allocation *alloc;
160 int ret;
161
162 bucket = mm_bucket_by_size(cache, size);
163 if (!bucket) {
164 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
165 0, cache->storage_type, bo);
166 if (ret)
167 debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
168
169 *offset = 0;
170 return NULL;
171 }
172
173 if (!LIST_IS_EMPTY(&bucket->used)) {
174 slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
175 } else {
176 if (LIST_IS_EMPTY(&bucket->free)) {
177 mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
178 }
179 slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
180
181 LIST_DEL(&slab->head);
182 LIST_ADD(&slab->head, &bucket->used);
183 }
184
185 *offset = mm_slab_alloc(slab) << slab->order;
186
187 alloc = MALLOC_STRUCT(nouveau_mm_allocation);
188 if (!alloc)
189 return NULL;
190
191 nouveau_bo_ref(slab->bo, bo);
192
193 if (slab->free == 0) {
194 LIST_DEL(&slab->head);
195 LIST_ADD(&slab->head, &bucket->full);
196 }
197
198 alloc->next = NULL;
199 alloc->offset = *offset;
200 alloc->priv = (void *)slab;
201
202 return alloc;
203 }
204
205 void
206 nouveau_mm_free(struct nouveau_mm_allocation *alloc)
207 {
208 struct mm_slab *slab = (struct mm_slab *)alloc->priv;
209 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
210
211 mm_slab_free(slab, alloc->offset >> slab->order);
212
213 if (slab->free == 1) {
214 LIST_DEL(&slab->head);
215
216 if (slab->count > 1)
217 LIST_ADDTAIL(&slab->head, &bucket->used);
218 else
219 LIST_ADDTAIL(&slab->head, &bucket->free);
220 }
221
222 FREE(alloc);
223 }
224
225 void
226 nouveau_mm_free_work(void *data)
227 {
228 nouveau_mm_free(data);
229 }
230
231 struct nouveau_mman *
232 nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
233 uint32_t storage_type)
234 {
235 struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
236 int i;
237
238 if (!cache)
239 return NULL;
240
241 cache->dev = dev;
242 cache->domain = domain;
243 cache->storage_type = storage_type;
244 cache->allocated = 0;
245
246 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
247 LIST_INITHEAD(&cache->bucket[i].free);
248 LIST_INITHEAD(&cache->bucket[i].used);
249 LIST_INITHEAD(&cache->bucket[i].full);
250 }
251
252 return cache;
253 }
254
255 static INLINE void
256 nouveau_mm_free_slabs(struct list_head *head)
257 {
258 struct mm_slab *slab, *next;
259
260 LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
261 LIST_DEL(&slab->head);
262 nouveau_bo_ref(NULL, &slab->bo);
263 FREE(slab);
264 }
265 }
266
267 void
268 nouveau_mm_destroy(struct nouveau_mman *cache)
269 {
270 int i;
271
272 if (!cache)
273 return;
274
275 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
276 if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
277 !LIST_IS_EMPTY(&cache->bucket[i].full))
278 debug_printf("WARNING: destroying GPU memory cache "
279 "with some buffers still in use\n");
280
281 nouveau_mm_free_slabs(&cache->bucket[i].free);
282 nouveau_mm_free_slabs(&cache->bucket[i].used);
283 nouveau_mm_free_slabs(&cache->bucket[i].full);
284 }
285
286 FREE(cache);
287 }
288