+static struct iris_bo *
+alloc_bo_from_cache(struct iris_bufmgr *bufmgr,
+ struct bo_cache_bucket *bucket,
+ uint32_t alignment,
+ enum iris_memory_zone memzone,
+ unsigned flags,
+ bool match_zone)
+{
+ if (!bucket)
+ return NULL;
+
+ struct iris_bo *bo = NULL;
+
+ list_for_each_entry_safe(struct iris_bo, cur, &bucket->head, head) {
+ /* Try a little harder to find one that's already in the right memzone */
+ if (match_zone && memzone != iris_memzone_for_address(cur->gtt_offset))
+ continue;
+
+ /* If the last BO in the cache is busy, there are no idle BOs. Bail,
+ * either falling back to a non-matching memzone, or if that fails,
+ * allocating a fresh buffer.
+ */
+ if (iris_bo_busy(cur))
+ return NULL;
+
+ list_del(&cur->head);
+
+ /* Tell the kernel we need this BO. If it still exists, we're done! */
+ if (iris_bo_madvise(cur, I915_MADV_WILLNEED)) {
+ bo = cur;
+ break;
+ }
+
+ /* This BO was purged, throw it out and keep looking. */
+ bo_free(cur);
+ }
+
+ if (!bo)
+ return NULL;
+
+ /* If the cached BO isn't in the right memory zone, or the alignment
+ * isn't sufficient, free the old memory and assign it a new address.
+ */
+ if (memzone != iris_memzone_for_address(bo->gtt_offset) ||
+ bo->gtt_offset % alignment != 0) {
+ vma_free(bufmgr, bo->gtt_offset, bo->size);
+ bo->gtt_offset = 0ull;
+ }
+
+ /* Zero the contents if necessary. If this fails, fall back to
+ * allocating a fresh BO, which will always be zeroed by the kernel.
+ */
+ if (flags & BO_ALLOC_ZEROED) {
+ void *map = iris_bo_map(NULL, bo, MAP_WRITE | MAP_RAW);
+ if (map) {
+ memset(map, 0, bo->size);
+ } else {
+ bo_free(bo);
+ return NULL;
+ }
+ }
+
+ return bo;
+}
+