4 #include "util/u_inlines.h"
5 #include "util/u_memory.h"
6 #include "util/u_double_list.h"
8 #include "nouveau_winsys.h"
9 #include "nouveau_screen.h"
10 #include "nouveau_mm.h"
12 #define MM_MIN_ORDER 7
13 #define MM_MAX_ORDER 20
15 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
17 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
18 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
21 struct list_head free
;
22 struct list_head used
;
23 struct list_head full
;
28 struct nouveau_device
*dev
;
29 struct mm_bucket bucket
[MM_NUM_BUCKETS
];
31 union nouveau_bo_config config
;
36 struct list_head head
;
37 struct nouveau_bo
*bo
;
38 struct nouveau_mman
*cache
;
46 mm_slab_alloc(struct mm_slab
*slab
)
53 for (i
= 0; i
< (slab
->count
+ 31) / 32; ++i
) {
54 b
= ffs(slab
->bits
[i
]) - 1;
57 assert(n
< slab
->count
);
59 slab
->bits
[i
] &= ~(1 << b
);
67 mm_slab_free(struct mm_slab
*slab
, int i
)
69 assert(i
< slab
->count
);
70 slab
->bits
[i
/ 32] |= 1 << (i
% 32);
72 assert(slab
->free
<= slab
->count
);
76 mm_get_order(uint32_t size
)
78 int s
= __builtin_clz(size
) ^ 31;
85 static struct mm_bucket
*
86 mm_bucket_by_order(struct nouveau_mman
*cache
, int order
)
88 if (order
> MM_MAX_ORDER
)
90 return &cache
->bucket
[MAX2(order
, MM_MIN_ORDER
) - MM_MIN_ORDER
];
93 static struct mm_bucket
*
94 mm_bucket_by_size(struct nouveau_mman
*cache
, unsigned size
)
96 return mm_bucket_by_order(cache
, mm_get_order(size
));
99 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
100 static INLINE
uint32_t
101 mm_default_slab_size(unsigned chunk_order
)
103 static const int8_t slab_order
[MM_MAX_ORDER
- MM_MIN_ORDER
+ 1] =
105 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
108 assert(chunk_order
<= MM_MAX_ORDER
&& chunk_order
>= MM_MIN_ORDER
);
110 return 1 << slab_order
[chunk_order
- MM_MIN_ORDER
];
114 mm_slab_new(struct nouveau_mman
*cache
, int chunk_order
)
116 struct mm_slab
*slab
;
118 const uint32_t size
= mm_default_slab_size(chunk_order
);
120 words
= ((size
>> chunk_order
) + 31) / 32;
123 slab
= MALLOC(sizeof(struct mm_slab
) + words
* 4);
125 return PIPE_ERROR_OUT_OF_MEMORY
;
127 memset(&slab
->bits
[0], ~0, words
* 4);
131 ret
= nouveau_bo_new(cache
->dev
, cache
->domain
, 0, size
, &cache
->config
,
135 return PIPE_ERROR_OUT_OF_MEMORY
;
138 LIST_INITHEAD(&slab
->head
);
141 slab
->order
= chunk_order
;
142 slab
->count
= slab
->free
= size
>> chunk_order
;
144 LIST_ADD(&slab
->head
, &mm_bucket_by_order(cache
, chunk_order
)->free
);
146 cache
->allocated
+= size
;
148 if (nouveau_mesa_debug
)
149 debug_printf("MM: new slab, total memory = %"PRIu64
" KiB\n",
150 cache
->allocated
/ 1024);
155 /* @return token to identify slab or NULL if we just allocated a new bo */
156 struct nouveau_mm_allocation
*
157 nouveau_mm_allocate(struct nouveau_mman
*cache
,
158 uint32_t size
, struct nouveau_bo
**bo
, uint32_t *offset
)
160 struct mm_bucket
*bucket
;
161 struct mm_slab
*slab
;
162 struct nouveau_mm_allocation
*alloc
;
165 bucket
= mm_bucket_by_size(cache
, size
);
167 ret
= nouveau_bo_new(cache
->dev
, cache
->domain
, 0, size
, &cache
->config
,
170 debug_printf("bo_new(%x, %x): %i\n",
171 size
, cache
->config
.nv50
.memtype
, ret
);
177 if (!LIST_IS_EMPTY(&bucket
->used
)) {
178 slab
= LIST_ENTRY(struct mm_slab
, bucket
->used
.next
, head
);
180 if (LIST_IS_EMPTY(&bucket
->free
)) {
181 mm_slab_new(cache
, MAX2(mm_get_order(size
), MM_MIN_ORDER
));
183 slab
= LIST_ENTRY(struct mm_slab
, bucket
->free
.next
, head
);
185 LIST_DEL(&slab
->head
);
186 LIST_ADD(&slab
->head
, &bucket
->used
);
189 *offset
= mm_slab_alloc(slab
) << slab
->order
;
191 alloc
= MALLOC_STRUCT(nouveau_mm_allocation
);
195 nouveau_bo_ref(slab
->bo
, bo
);
197 if (slab
->free
== 0) {
198 LIST_DEL(&slab
->head
);
199 LIST_ADD(&slab
->head
, &bucket
->full
);
203 alloc
->offset
= *offset
;
204 alloc
->priv
= (void *)slab
;
210 nouveau_mm_free(struct nouveau_mm_allocation
*alloc
)
212 struct mm_slab
*slab
= (struct mm_slab
*)alloc
->priv
;
213 struct mm_bucket
*bucket
= mm_bucket_by_order(slab
->cache
, slab
->order
);
215 mm_slab_free(slab
, alloc
->offset
>> slab
->order
);
217 if (slab
->free
== slab
->count
) {
218 LIST_DEL(&slab
->head
);
219 LIST_ADDTAIL(&slab
->head
, &bucket
->free
);
221 if (slab
->free
== 1) {
222 LIST_DEL(&slab
->head
);
223 LIST_ADDTAIL(&slab
->head
, &bucket
->used
);
230 nouveau_mm_free_work(void *data
)
232 nouveau_mm_free(data
);
235 struct nouveau_mman
*
236 nouveau_mm_create(struct nouveau_device
*dev
, uint32_t domain
,
237 union nouveau_bo_config
*config
)
239 struct nouveau_mman
*cache
= MALLOC_STRUCT(nouveau_mman
);
246 cache
->domain
= domain
;
247 cache
->config
= *config
;
248 cache
->allocated
= 0;
250 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
251 LIST_INITHEAD(&cache
->bucket
[i
].free
);
252 LIST_INITHEAD(&cache
->bucket
[i
].used
);
253 LIST_INITHEAD(&cache
->bucket
[i
].full
);
260 nouveau_mm_free_slabs(struct list_head
*head
)
262 struct mm_slab
*slab
, *next
;
264 LIST_FOR_EACH_ENTRY_SAFE(slab
, next
, head
, head
) {
265 LIST_DEL(&slab
->head
);
266 nouveau_bo_ref(NULL
, &slab
->bo
);
272 nouveau_mm_destroy(struct nouveau_mman
*cache
)
279 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
280 if (!LIST_IS_EMPTY(&cache
->bucket
[i
].used
) ||
281 !LIST_IS_EMPTY(&cache
->bucket
[i
].full
))
282 debug_printf("WARNING: destroying GPU memory cache "
283 "with some buffers still in use\n");
285 nouveau_mm_free_slabs(&cache
->bucket
[i
].free
);
286 nouveau_mm_free_slabs(&cache
->bucket
[i
].used
);
287 nouveau_mm_free_slabs(&cache
->bucket
[i
].full
);