2 #include "util/u_inlines.h"
3 #include "util/u_memory.h"
4 #include "util/u_double_list.h"
6 #include "nvc0_screen.h"
9 #define MM_MAX_ORDER 20
11 #define MM_NUM_BUCKETS (MM_MAX_ORDER - MM_MIN_ORDER + 1)
13 #define MM_MIN_SIZE (1 << MM_MIN_ORDER)
14 #define MM_MAX_SIZE (1 << MM_MAX_ORDER)
17 struct list_head free
;
18 struct list_head used
;
19 struct list_head full
;
24 struct nouveau_device
*dev
;
25 struct mm_bucket bucket
[MM_NUM_BUCKETS
];
26 uint32_t storage_type
;
32 struct list_head head
;
33 struct nouveau_bo
*bo
;
34 struct nvc0_mman
*cache
;
42 mm_slab_alloc(struct mm_slab
*slab
)
49 for (i
= 0; i
< (slab
->count
+ 31) / 32; ++i
) {
50 b
= ffs(slab
->bits
[i
]) - 1;
53 assert(n
< slab
->count
);
55 slab
->bits
[i
] &= ~(1 << b
);
63 mm_slab_free(struct mm_slab
*slab
, int i
)
65 assert(i
< slab
->count
);
66 slab
->bits
[i
/ 32] |= 1 << (i
% 32);
68 assert(slab
->free
<= slab
->count
);
72 mm_get_order(uint32_t size
)
74 int s
= __builtin_clz(size
) ^ 31;
81 static struct mm_bucket
*
82 mm_bucket_by_order(struct nvc0_mman
*cache
, int order
)
84 if (order
> MM_MAX_ORDER
)
86 return &cache
->bucket
[MAX2(order
, MM_MIN_ORDER
) - MM_MIN_ORDER
];
89 static struct mm_bucket
*
90 mm_bucket_by_size(struct nvc0_mman
*cache
, unsigned size
)
92 return mm_bucket_by_order(cache
, mm_get_order(size
));
95 /* size of bo allocation for slab with chunks of (1 << chunk_order) bytes */
96 static INLINE
uint32_t
97 mm_default_slab_size(unsigned chunk_order
)
99 assert(chunk_order
<= MM_MAX_ORDER
&& chunk_order
>= MM_MIN_ORDER
);
101 static const int8_t slab_order
[MM_MAX_ORDER
- MM_MIN_ORDER
+ 1] =
103 12, 12, 13, 14, 14, 17, 17, 17, 17, 19, 19, 20, 21, 22
106 return 1 << slab_order
[chunk_order
- MM_MIN_ORDER
];
110 mm_slab_new(struct nvc0_mman
*cache
, int chunk_order
)
112 struct mm_slab
*slab
;
114 const uint32_t size
= mm_default_slab_size(chunk_order
);
116 words
= ((size
>> chunk_order
) + 31) / 32;
119 slab
= MALLOC(sizeof(struct mm_slab
) + words
* 4);
121 return PIPE_ERROR_OUT_OF_MEMORY
;
123 memset(&slab
->bits
[0], ~0, words
* 4);
126 ret
= nouveau_bo_new_tile(cache
->dev
, cache
->domain
, 0, size
,
127 0, cache
->storage_type
, &slab
->bo
);
130 return PIPE_ERROR_OUT_OF_MEMORY
;
133 LIST_INITHEAD(&slab
->head
);
136 slab
->order
= chunk_order
;
137 slab
->count
= slab
->free
= size
>> chunk_order
;
139 LIST_ADD(&slab
->head
, &mm_bucket_by_order(cache
, chunk_order
)->free
);
141 cache
->allocated
+= size
;
143 debug_printf("MM: new slab, total memory = %lu KiB\n",
144 cache
->allocated
/ 1024);
149 /* @return token to identify slab or NULL if we just allocated a new bo */
150 struct nvc0_mm_allocation
*
151 nvc0_mm_allocate(struct nvc0_mman
*cache
,
152 uint32_t size
, struct nouveau_bo
**bo
, uint32_t *offset
)
154 struct mm_bucket
*bucket
;
155 struct mm_slab
*slab
;
156 struct nvc0_mm_allocation
*alloc
;
159 bucket
= mm_bucket_by_size(cache
, size
);
161 ret
= nouveau_bo_new_tile(cache
->dev
, cache
->domain
, 0, size
,
162 0, cache
->storage_type
, bo
);
164 debug_printf("bo_new(%x, %x): %i\n", size
, cache
->storage_type
, ret
);
170 if (!LIST_IS_EMPTY(&bucket
->used
)) {
171 slab
= LIST_ENTRY(struct mm_slab
, bucket
->used
.next
, head
);
173 if (LIST_IS_EMPTY(&bucket
->free
)) {
174 mm_slab_new(cache
, MAX2(mm_get_order(size
), MM_MIN_ORDER
));
176 slab
= LIST_ENTRY(struct mm_slab
, bucket
->free
.next
, head
);
178 LIST_DEL(&slab
->head
);
179 LIST_ADD(&slab
->head
, &bucket
->used
);
182 *offset
= mm_slab_alloc(slab
) << slab
->order
;
184 alloc
= MALLOC_STRUCT(nvc0_mm_allocation
);
188 nouveau_bo_ref(slab
->bo
, bo
);
190 if (slab
->free
== 0) {
191 LIST_DEL(&slab
->head
);
192 LIST_ADD(&slab
->head
, &bucket
->full
);
196 alloc
->offset
= *offset
;
197 alloc
->priv
= (void *)slab
;
203 nvc0_mm_free(struct nvc0_mm_allocation
*alloc
)
205 struct mm_slab
*slab
= (struct mm_slab
*)alloc
->priv
;
206 struct mm_bucket
*bucket
= mm_bucket_by_order(slab
->cache
, slab
->order
);
208 mm_slab_free(slab
, alloc
->offset
>> slab
->order
);
210 if (slab
->free
== 1) {
211 LIST_DEL(&slab
->head
);
214 LIST_ADDTAIL(&slab
->head
, &bucket
->used
);
216 LIST_ADDTAIL(&slab
->head
, &bucket
->free
);
223 nvc0_mm_create(struct nouveau_device
*dev
, uint32_t domain
,
224 uint32_t storage_type
)
226 struct nvc0_mman
*cache
= MALLOC_STRUCT(nvc0_mman
);
233 cache
->domain
= domain
;
234 cache
->storage_type
= storage_type
;
235 cache
->allocated
= 0;
237 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
238 LIST_INITHEAD(&cache
->bucket
[i
].free
);
239 LIST_INITHEAD(&cache
->bucket
[i
].used
);
240 LIST_INITHEAD(&cache
->bucket
[i
].full
);
247 nvc0_mm_free_slabs(struct list_head
*head
)
249 struct mm_slab
*slab
, *next
;
251 LIST_FOR_EACH_ENTRY_SAFE(slab
, next
, head
, head
) {
252 LIST_DEL(&slab
->head
);
253 nouveau_bo_ref(NULL
, &slab
->bo
);
259 nvc0_mm_destroy(struct nvc0_mman
*cache
)
263 for (i
= 0; i
< MM_NUM_BUCKETS
; ++i
) {
264 if (!LIST_IS_EMPTY(&cache
->bucket
[i
].used
) ||
265 !LIST_IS_EMPTY(&cache
->bucket
[i
].full
))
266 debug_printf("WARNING: destroying GPU memory cache "
267 "with some buffers still in use\n");
269 nvc0_mm_free_slabs(&cache
->bucket
[i
].free
);
270 nvc0_mm_free_slabs(&cache
->bucket
[i
].used
);
271 nvc0_mm_free_slabs(&cache
->bucket
[i
].full
);