unallocated += align(item->size_in_dw, ITEM_ALIGNMENT);
}
- /* If we require more space than the size of the pool, then grow the
- * pool.
- *
- * XXX: I'm pretty sure this won't work. Imagine this scenario:
- *
- * Offset Item Size
- * 0 A 50
- * 200 B 50
- * 400 C 50
- *
- * Total size = 450
- * Allocated size = 150
- * Pending Item D Size = 200
- *
- * In this case, there are 300 units of free space in the pool, but
- * they aren't contiguous, so it will be impossible to allocate Item D.
- */
+ if (pool->status & POOL_FRAGMENTED) {
+ compute_memory_defrag(pool, pipe);
+ }
+
if (pool->size_in_dw < allocated + unallocated) {
err = compute_memory_grow_pool(pool, pipe, allocated + unallocated);
if (err == -1)
last_pos += align(item->size_in_dw, ITEM_ALIGNMENT);
}
+
+ pool->status &= ~POOL_FRAGMENTED;
}
int compute_memory_promote_item(struct compute_memory_pool *pool,
/* Remember to mark the buffer as 'pending' by setting start_in_dw to -1 */
item->start_in_dw = -1;
+
+ if (item->link.next != pool->item_list) {
+ pool->status |= POOL_FRAGMENTED;
+ }
}
/**
LIST_FOR_EACH_ENTRY_SAFE(item, next, pool->item_list, link) {
if (item->id == id) {
+
+ if (item->link.next != pool->item_list) {
+ pool->status |= POOL_FRAGMENTED;
+ }
+
list_del(&item->link);
if (item->real_buffer) {
#define ITEM_FOR_PROMOTING (1<<2)
#define ITEM_FOR_DEMOTING (1<<3)
+#define POOL_FRAGMENTED (1<<0)
+
struct compute_memory_pool;
struct compute_memory_item
uint32_t *shadow; ///host copy of the pool, used for defragmentation
+ uint32_t status; /**< Status of the pool */
+
struct list_head *item_list; ///Allocated memory chunks in the buffer,they must be ordered by "start_in_dw"
struct list_head *unallocated_list; ///Unallocated memory chunks
};