4 #include "util/u_inlines.h"
5 #include "util/u_memory.h"
6 #include "util/u_double_list.h"
8 #include "nouveau_screen.h"
9 #include "nouveau_mm.h"
11 #include "nouveau/nouveau_bo.h"
13 #define MM_MIN_ORDER 7
14 #define MM_MAX_ORDER 20
16 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
18 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
19 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
22 struct list_head free
;
23 struct list_head used
;
24 struct list_head full
;
29 struct nouveau_device
*dev
;
30 struct mm_bucket bucket
[MM_NUM_BUCKETS
];
31 uint32_t storage_type
;
37 struct list_head head
;
38 struct nouveau_bo
*bo
;
39 struct nouveau_mman
*cache
;
47 mm_slab_alloc(struct mm_slab
*slab
)
54 for (i
= 0; i
< (slab
->count
+ 31) / 32; ++i
) {
55 b
= ffs(slab
->bits
[i
]) - 1;
58 assert(n
< slab
->count
);
60 slab
->bits
[i
] &= ~(1 << b
);
68 mm_slab_free(struct mm_slab
*slab
, int i
)
70 assert(i
< slab
->count
);
71 slab
->bits
[i
/ 32] |= 1 << (i
% 32);
73 assert(slab
->free
<= slab
->count
);
77 mm_get_order(uint32_t size
)
79 int s
= __builtin_clz(size
) ^ 31;
86 static struct mm_bucket
*
87 mm_bucket_by_order(struct nouveau_mman
*cache
, int order
)
89 if (order
> MM_MAX_ORDER
)
91 return &cache
->bucket
[MAX2(order
, MM_MIN_ORDER
) - MM_MIN_ORDER
];
94 static struct mm_bucket
*
95 mm_bucket_by_size(struct nouveau_mman
*cache
, unsigned size
)
97 return mm_bucket_by_order(cache
, mm_get_order(size
));
100 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
101 static INLINE
uint32_t
102 mm_default_slab_size(unsigned chunk_order
)
104 static const int8_t slab_order
[MM_MAX_ORDER
- MM_MIN_ORDER
+ 1] =
106 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
109 assert(chunk_order
<= MM_MAX_ORDER
&& chunk_order
>= MM_MIN_ORDER
);
111 return 1 << slab_order
[chunk_order
- MM_MIN_ORDER
];
115 mm_slab_new(struct nouveau_mman
*cache
, int chunk_order
)
117 struct mm_slab
*slab
;
119 const uint32_t size
= mm_default_slab_size(chunk_order
);
121 words
= ((size
>> chunk_order
) + 31) / 32;
124 slab
= MALLOC(sizeof(struct mm_slab
) + words
* 4);
126 return PIPE_ERROR_OUT_OF_MEMORY
;
128 memset(&slab
->bits
[0], ~0, words
* 4);
131 ret
= nouveau_bo_new_tile(cache
->dev
, cache
->domain
, 0, size
,
132 0, cache
->storage_type
, &slab
->bo
);
135 return PIPE_ERROR_OUT_OF_MEMORY
;
138 LIST_INITHEAD(&slab
->head
);
141 slab
->order
= chunk_order
;
142 slab
->count
= slab
->free
= size
>> chunk_order
;
144 LIST_ADD(&slab
->head
, &mm_bucket_by_order(cache
, chunk_order
)->free
);
146 cache
->allocated
+= size
;
148 if (nouveau_mesa_debug
)
149 debug_printf("MM: new slab, total memory = %"PRIu64
" KiB\n",
150 cache
->allocated
/ 1024);
155 /* @return token to identify slab or NULL if we just allocated a new bo */
156 struct nouveau_mm_allocation
*
157 nouveau_mm_allocate(struct nouveau_mman
*cache
,
158 uint32_t size
, struct nouveau_bo
**bo
, uint32_t *offset
)
160 struct mm_bucket
*bucket
;
161 struct mm_slab
*slab
;
162 struct nouveau_mm_allocation
*alloc
;
165 bucket
= mm_bucket_by_size(cache
, size
);
167 ret
= nouveau_bo_new_tile(cache
->dev
, cache
->domain
, 0, size
,
168 0, cache
->storage_type
, bo
);
170 debug_printf("bo_new(%x, %x): %i\n", size
, cache
->storage_type
, ret
);
176 if (!LIST_IS_EMPTY(&bucket
->used
)) {
177 slab
= LIST_ENTRY(struct mm_slab
, bucket
->used
.next
, head
);
179 if (LIST_IS_EMPTY(&bucket
->free
)) {
180 mm_slab_new(cache
, MAX2(mm_get_order(size
), MM_MIN_ORDER
));
182 slab
= LIST_ENTRY(struct mm_slab
, bucket
->free
.next
, head
);
184 LIST_DEL(&slab
->head
);
185 LIST_ADD(&slab
->head
, &bucket
->used
);
188 *offset
= mm_slab_alloc(slab
) << slab
->order
;
190 alloc
= MALLOC_STRUCT(nouveau_mm_allocation
);
194 nouveau_bo_ref(slab
->bo
, bo
);
196 if (slab
->free
== 0) {
197 LIST_DEL(&slab
->head
);
198 LIST_ADD(&slab
->head
, &bucket
->full
);
202 alloc
->offset
= *offset
;
203 alloc
->priv
= (void *)slab
;
209 nouveau_mm_free(struct nouveau_mm_allocation
*alloc
)
211 struct mm_slab
*slab
= (struct mm_slab
*)alloc
->priv
;
212 struct mm_bucket
*bucket
= mm_bucket_by_order(slab
->cache
, slab
->order
);
214 mm_slab_free(slab
, alloc
->offset
>> slab
->order
);
216 if (slab
->free
== slab
->count
) {
217 LIST_DEL(&slab
->head
);
218 LIST_ADDTAIL(&slab
->head
, &bucket
->free
);
220 if (slab
->free
== 1) {
221 LIST_DEL(&slab
->head
);
222 LIST_ADDTAIL(&slab
->head
, &bucket
->used
);
229 nouveau_mm_free_work(void *data
)
231 nouveau_mm_free(data
);
234 struct nouveau_mman
*
235 nouveau_mm_create(struct nouveau_device
*dev
, uint32_t domain
,
236 uint32_t storage_type
)
238 struct nouveau_mman
*cache
= MALLOC_STRUCT(nouveau_mman
);
245 cache
->domain
= domain
;
246 cache
->storage_type
= storage_type
;
247 cache
->allocated
= 0;
249 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
250 LIST_INITHEAD(&cache
->bucket
[i
].free
);
251 LIST_INITHEAD(&cache
->bucket
[i
].used
);
252 LIST_INITHEAD(&cache
->bucket
[i
].full
);
259 nouveau_mm_free_slabs(struct list_head
*head
)
261 struct mm_slab
*slab
, *next
;
263 LIST_FOR_EACH_ENTRY_SAFE(slab
, next
, head
, head
) {
264 LIST_DEL(&slab
->head
);
265 nouveau_bo_ref(NULL
, &slab
->bo
);
271 nouveau_mm_destroy(struct nouveau_mman
*cache
)
278 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
279 if (!LIST_IS_EMPTY(&cache
->bucket
[i
].used
) ||
280 !LIST_IS_EMPTY(&cache
->bucket
[i
].full
))
281 debug_printf("WARNING: destroying GPU memory cache "
282 "with some buffers still in use\n");
284 nouveau_mm_free_slabs(&cache
->bucket
[i
].free
);
285 nouveau_mm_free_slabs(&cache
->bucket
[i
].used
);
286 nouveau_mm_free_slabs(&cache
->bucket
[i
].full
);