+ if (pool->fd >= 0)
+ close(pool->fd);
+}
+
+static VkResult
+anv_block_pool_expand_range(struct anv_block_pool *pool,
+ uint32_t center_bo_offset, uint32_t size)
+{
+ /* Assert that we only ever grow the pool */
+ assert(center_bo_offset >= pool->back_state.end);
+ assert(size - center_bo_offset >= pool->state.end);
+
+ /* Assert that we don't go outside the bounds of the memfd */
+ assert(center_bo_offset <= BLOCK_POOL_MEMFD_CENTER);
+ assert(pool->use_softpin ||
+ size - center_bo_offset <=
+ BLOCK_POOL_MEMFD_SIZE - BLOCK_POOL_MEMFD_CENTER);
+
+ /* For state pool BOs we have to be a bit careful about where we place them
+ * in the GTT. There are two documented workarounds for state base address
+ * placement : Wa32bitGeneralStateOffset and Wa32bitInstructionBaseOffset
+ * which state that those two base addresses do not support 48-bit
+ * addresses and need to be placed in the bottom 32-bit range.
+ * Unfortunately, this is not quite accurate.
+ *
+ * The real problem is that we always set the size of our state pools in
+ * STATE_BASE_ADDRESS to 0xfffff (the maximum) even though the BO is most
+ * likely significantly smaller. We do this because we do not no at the
+ * time we emit STATE_BASE_ADDRESS whether or not we will need to expand
+ * the pool during command buffer building so we don't actually have a
+ * valid final size. If the address + size, as seen by STATE_BASE_ADDRESS
+ * overflows 48 bits, the GPU appears to treat all accesses to the buffer
+ * as being out of bounds and returns zero. For dynamic state, this
+ * usually just leads to rendering corruptions, but shaders that are all
+ * zero hang the GPU immediately.
+ *
+ * The easiest solution to do is exactly what the bogus workarounds say to
+ * do: restrict these buffers to 32-bit addresses. We could also pin the
+ * BO to some particular location of our choosing, but that's significantly
+ * more work than just not setting a flag. So, we explicitly DO NOT set
+ * the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
+ * hard work for us. When using softpin, we're in control and the fixed
+ * addresses we choose are fine for base addresses.
+ */
+ enum anv_bo_alloc_flags bo_alloc_flags = ANV_BO_ALLOC_CAPTURE;
+ if (!pool->use_softpin)
+ bo_alloc_flags |= ANV_BO_ALLOC_32BIT_ADDRESS;
+
+ if (pool->use_softpin) {
+ uint32_t new_bo_size = size - pool->size;
+ struct anv_bo *new_bo;
+ assert(center_bo_offset == 0);
+ VkResult result = anv_device_alloc_bo(pool->device, new_bo_size,
+ bo_alloc_flags |
+ ANV_BO_ALLOC_FIXED_ADDRESS |
+ ANV_BO_ALLOC_MAPPED |
+ ANV_BO_ALLOC_SNOOPED,
+ pool->start_address + pool->size,
+ &new_bo);
+ if (result != VK_SUCCESS)
+ return result;
+
+ pool->bos[pool->nbos++] = new_bo;
+
+ /* This pointer will always point to the first BO in the list */
+ pool->bo = pool->bos[0];
+ } else {
+ /* Just leak the old map until we destroy the pool. We can't munmap it
+ * without races or imposing locking on the block allocate fast path. On
+ * the whole the leaked maps adds up to less than the size of the
+ * current map. MAP_POPULATE seems like the right thing to do, but we
+ * should try to get some numbers.
+ */
+ void *map = mmap(NULL, size, PROT_READ | PROT_WRITE,
+ MAP_SHARED | MAP_POPULATE, pool->fd,
+ BLOCK_POOL_MEMFD_CENTER - center_bo_offset);
+ if (map == MAP_FAILED)
+ return vk_errorf(pool->device, pool->device,
+ VK_ERROR_MEMORY_MAP_FAILED, "mmap failed: %m");
+
+ struct anv_bo *new_bo;
+ VkResult result = anv_device_import_bo_from_host_ptr(pool->device,
+ map, size,
+ bo_alloc_flags,
+ 0 /* client_address */,
+ &new_bo);
+ if (result != VK_SUCCESS) {
+ munmap(map, size);
+ return result;
+ }
+
+ struct anv_mmap_cleanup *cleanup = u_vector_add(&pool->mmap_cleanups);
+ if (!cleanup) {
+ munmap(map, size);
+ anv_device_release_bo(pool->device, new_bo);
+ return vk_error(VK_ERROR_OUT_OF_HOST_MEMORY);
+ }
+ cleanup->map = map;
+ cleanup->size = size;
+
+ /* Now that we mapped the new memory, we can write the new
+ * center_bo_offset back into pool and update pool->map. */
+ pool->center_bo_offset = center_bo_offset;
+ pool->map = map + center_bo_offset;
+
+ pool->bos[pool->nbos++] = new_bo;
+ pool->wrapper_bo.map = new_bo;