nouveau: hide some debugging messages behind environment variable
[mesa.git] / src / gallium / drivers / nouveau / nouveau_mm.c
1
2 #include <inttypes.h>
3
4 #include "util/u_inlines.h"
5 #include "util/u_memory.h"
6 #include "util/u_double_list.h"
7
8 #include "nouveau_screen.h"
9 #include "nouveau_mm.h"
10
11 #include "nouveau/nouveau_bo.h"
12
13 #define MM_MIN_ORDER 7
14 #define MM_MAX_ORDER 20
15
16 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
17
18 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
19 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
20
21 struct mm_bucket {
22 struct list_head free;
23 struct list_head used;
24 struct list_head full;
25 int num_free;
26 };
27
28 struct nouveau_mman {
29 struct nouveau_device *dev;
30 struct mm_bucket bucket[MM_NUM_BUCKETS];
31 uint32_t storage_type;
32 uint32_t domain;
33 uint64_t allocated;
34 };
35
36 struct mm_slab {
37 struct list_head head;
38 struct nouveau_bo *bo;
39 struct nouveau_mman *cache;
40 int order;
41 int count;
42 int free;
43 uint32_t bits[0];
44 };
45
46 static int
47 mm_slab_alloc(struct mm_slab *slab)
48 {
49 int i, n, b;
50
51 if (slab->free == 0)
52 return -1;
53
54 for (i = 0; i < (slab->count + 31) / 32; ++i) {
55 b = ffs(slab->bits[i]) - 1;
56 if (b >= 0) {
57 n = i * 32 + b;
58 assert(n < slab->count);
59 slab->free--;
60 slab->bits[i] &= ~(1 << b);
61 return n;
62 }
63 }
64 return -1;
65 }
66
67 static INLINE void
68 mm_slab_free(struct mm_slab *slab, int i)
69 {
70 assert(i < slab->count);
71 slab->bits[i / 32] |= 1 << (i % 32);
72 slab->free++;
73 assert(slab->free <= slab->count);
74 }
75
76 static INLINE int
77 mm_get_order(uint32_t size)
78 {
79 int s = __builtin_clz(size) ^ 31;
80
81 if (size > (1 << s))
82 s += 1;
83 return s;
84 }
85
86 static struct mm_bucket *
87 mm_bucket_by_order(struct nouveau_mman *cache, int order)
88 {
89 if (order > MM_MAX_ORDER)
90 return NULL;
91 return &cache->bucket[MAX2(order, MM_MIN_ORDER) - MM_MIN_ORDER];
92 }
93
94 static struct mm_bucket *
95 mm_bucket_by_size(struct nouveau_mman *cache, unsigned size)
96 {
97 return mm_bucket_by_order(cache, mm_get_order(size));
98 }
99
100 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
101 static INLINE uint32_t
102 mm_default_slab_size(unsigned chunk_order)
103 {
104 static const int8_t slab_order[MM_MAX_ORDER - MM_MIN_ORDER + 1] =
105 {
106 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
107 };
108
109 assert(chunk_order <= MM_MAX_ORDER && chunk_order >= MM_MIN_ORDER);
110
111 return 1 << slab_order[chunk_order - MM_MIN_ORDER];
112 }
113
114 static int
115 mm_slab_new(struct nouveau_mman *cache, int chunk_order)
116 {
117 struct mm_slab *slab;
118 int words, ret;
119 const uint32_t size = mm_default_slab_size(chunk_order);
120
121 words = ((size >> chunk_order) + 31) / 32;
122 assert(words);
123
124 slab = MALLOC(sizeof(struct mm_slab) + words * 4);
125 if (!slab)
126 return PIPE_ERROR_OUT_OF_MEMORY;
127
128 memset(&slab->bits[0], ~0, words * 4);
129
130 slab->bo = NULL;
131 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
132 0, cache->storage_type, &slab->bo);
133 if (ret) {
134 FREE(slab);
135 return PIPE_ERROR_OUT_OF_MEMORY;
136 }
137
138 LIST_INITHEAD(&slab->head);
139
140 slab->cache = cache;
141 slab->order = chunk_order;
142 slab->count = slab->free = size >> chunk_order;
143
144 LIST_ADD(&slab->head, &mm_bucket_by_order(cache, chunk_order)->free);
145
146 cache->allocated += size;
147
148 if (nouveau_mesa_debug)
149 debug_printf("MM: new slab, total memory = %"PRIu64" KiB\n",
150 cache->allocated / 1024);
151
152 return PIPE_OK;
153 }
154
155 /* @return token to identify slab or NULL if we just allocated a new bo */
156 struct nouveau_mm_allocation *
157 nouveau_mm_allocate(struct nouveau_mman *cache,
158 uint32_t size, struct nouveau_bo **bo, uint32_t *offset)
159 {
160 struct mm_bucket *bucket;
161 struct mm_slab *slab;
162 struct nouveau_mm_allocation *alloc;
163 int ret;
164
165 bucket = mm_bucket_by_size(cache, size);
166 if (!bucket) {
167 ret = nouveau_bo_new_tile(cache->dev, cache->domain, 0, size,
168 0, cache->storage_type, bo);
169 if (ret)
170 debug_printf("bo_new(%x, %x): %i\n", size, cache->storage_type, ret);
171
172 *offset = 0;
173 return NULL;
174 }
175
176 if (!LIST_IS_EMPTY(&bucket->used)) {
177 slab = LIST_ENTRY(struct mm_slab, bucket->used.next, head);
178 } else {
179 if (LIST_IS_EMPTY(&bucket->free)) {
180 mm_slab_new(cache, MAX2(mm_get_order(size), MM_MIN_ORDER));
181 }
182 slab = LIST_ENTRY(struct mm_slab, bucket->free.next, head);
183
184 LIST_DEL(&slab->head);
185 LIST_ADD(&slab->head, &bucket->used);
186 }
187
188 *offset = mm_slab_alloc(slab) << slab->order;
189
190 alloc = MALLOC_STRUCT(nouveau_mm_allocation);
191 if (!alloc)
192 return NULL;
193
194 nouveau_bo_ref(slab->bo, bo);
195
196 if (slab->free == 0) {
197 LIST_DEL(&slab->head);
198 LIST_ADD(&slab->head, &bucket->full);
199 }
200
201 alloc->next = NULL;
202 alloc->offset = *offset;
203 alloc->priv = (void *)slab;
204
205 return alloc;
206 }
207
208 void
209 nouveau_mm_free(struct nouveau_mm_allocation *alloc)
210 {
211 struct mm_slab *slab = (struct mm_slab *)alloc->priv;
212 struct mm_bucket *bucket = mm_bucket_by_order(slab->cache, slab->order);
213
214 mm_slab_free(slab, alloc->offset >> slab->order);
215
216 if (slab->free == slab->count) {
217 LIST_DEL(&slab->head);
218 LIST_ADDTAIL(&slab->head, &bucket->free);
219 } else
220 if (slab->free == 1) {
221 LIST_DEL(&slab->head);
222 LIST_ADDTAIL(&slab->head, &bucket->used);
223 }
224
225 FREE(alloc);
226 }
227
228 void
229 nouveau_mm_free_work(void *data)
230 {
231 nouveau_mm_free(data);
232 }
233
234 struct nouveau_mman *
235 nouveau_mm_create(struct nouveau_device *dev, uint32_t domain,
236 uint32_t storage_type)
237 {
238 struct nouveau_mman *cache = MALLOC_STRUCT(nouveau_mman);
239 int i;
240
241 if (!cache)
242 return NULL;
243
244 cache->dev = dev;
245 cache->domain = domain;
246 cache->storage_type = storage_type;
247 cache->allocated = 0;
248
249 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
250 LIST_INITHEAD(&cache->bucket[i].free);
251 LIST_INITHEAD(&cache->bucket[i].used);
252 LIST_INITHEAD(&cache->bucket[i].full);
253 }
254
255 return cache;
256 }
257
258 static INLINE void
259 nouveau_mm_free_slabs(struct list_head *head)
260 {
261 struct mm_slab *slab, *next;
262
263 LIST_FOR_EACH_ENTRY_SAFE(slab, next, head, head) {
264 LIST_DEL(&slab->head);
265 nouveau_bo_ref(NULL, &slab->bo);
266 FREE(slab);
267 }
268 }
269
270 void
271 nouveau_mm_destroy(struct nouveau_mman *cache)
272 {
273 int i;
274
275 if (!cache)
276 return;
277
278 for (i = 0; i < MM_NUM_BUCKETS; ++i) {
279 if (!LIST_IS_EMPTY(&cache->bucket[i].used) ||
280 !LIST_IS_EMPTY(&cache->bucket[i].full))
281 debug_printf("WARNING: destroying GPU memory cache "
282 "with some buffers still in use\n");
283
284 nouveau_mm_free_slabs(&cache->bucket[i].free);
285 nouveau_mm_free_slabs(&cache->bucket[i].used);
286 nouveau_mm_free_slabs(&cache->bucket[i].full);
287 }
288
289 FREE(cache);
290 }
291