VkResult
anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t initial_size,
uint64_t bo_flags)
{
pool->device = device;
pool->bo_flags = bo_flags;
+ pool->start_address = gen_canonical_address(start_address);
+
anv_bo_init(&pool->bo, 0, 0);
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
* hard work for us.
*/
anv_bo_init(&pool->bo, gem_handle, size);
+ if (pool->bo_flags & EXEC_OBJECT_PINNED) {
+ pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+ center_bo_offset;
+ }
pool->bo.flags = pool->bo_flags;
pool->bo.map = map;
VkResult
anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t block_size,
uint64_t bo_flags)
{
VkResult result = anv_block_pool_init(&pool->block_pool, device,
+ start_address,
block_size * 16,
bo_flags);
if (result != VK_SUCCESS)
if (result != VK_SUCCESS)
goto fail_batch_bo_pool;
- /* For the state pools we explicitly disable 48bit. */
- bo_flags = (physical_device->has_exec_async ? EXEC_OBJECT_ASYNC : 0) |
- (physical_device->has_exec_capture ? EXEC_OBJECT_CAPTURE : 0);
+ if (physical_device->use_softpin)
+ bo_flags |= EXEC_OBJECT_PINNED;
+ else
+ bo_flags &= ~EXEC_OBJECT_SUPPORTS_48B_ADDRESS;
- result = anv_state_pool_init(&device->dynamic_state_pool, device, 16384,
+ result = anv_state_pool_init(&device->dynamic_state_pool, device,
+ DYNAMIC_STATE_POOL_MIN_ADDRESS,
+ 16384,
bo_flags);
if (result != VK_SUCCESS)
goto fail_bo_cache;
- result = anv_state_pool_init(&device->instruction_state_pool, device, 16384,
+ result = anv_state_pool_init(&device->instruction_state_pool, device,
+ INSTRUCTION_STATE_POOL_MIN_ADDRESS,
+ 16384,
bo_flags);
if (result != VK_SUCCESS)
goto fail_dynamic_state_pool;
- result = anv_state_pool_init(&device->surface_state_pool, device, 4096,
+ result = anv_state_pool_init(&device->surface_state_pool, device,
+ SURFACE_STATE_POOL_MIN_ADDRESS,
+ 4096,
bo_flags);
if (result != VK_SUCCESS)
goto fail_instruction_state_pool;
struct anv_bo bo;
+ /* The address where the start of the pool is pinned. The various bos that
+ * are created as the pool grows will have addresses in the range
+ * [start_address, start_address + BLOCK_POOL_MEMFD_SIZE).
+ */
+ uint64_t start_address;
+
/* The offset from the start of the bo to the "center" of the block
* pool. Pointers to allocated blocks are given by
* bo.map + center_bo_offset + offsets.
*/
VkResult anv_block_pool_init(struct anv_block_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t initial_size,
uint64_t bo_flags);
void anv_block_pool_finish(struct anv_block_pool *pool);
VkResult anv_state_pool_init(struct anv_state_pool *pool,
struct anv_device *device,
+ uint64_t start_address,
uint32_t block_size,
uint64_t bo_flags);
void anv_state_pool_finish(struct anv_state_pool *pool);
struct anv_block_pool pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_block_pool_init(&pool, &device, 4096, 0);
+ anv_block_pool_init(&pool, &device, 4096, 4096, 0);
for (unsigned i = 0; i < NUM_THREADS; i++) {
jobs[i].pool = &pool;
pthread_mutex_init(&device.mutex, NULL);
for (unsigned i = 0; i < NUM_RUNS; i++) {
- anv_state_pool_init(&state_pool, &device, 256, 0);
+ anv_state_pool_init(&state_pool, &device, 4096, 256, 0);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_state_pool_init(&state_pool, &device, 4096, 0);
+ anv_state_pool_init(&state_pool, &device, 4096, 4096, 0);
/* Grab one so a zero offset is impossible */
anv_state_pool_alloc(&state_pool, 16, 16);
struct anv_state_pool state_pool;
pthread_mutex_init(&device.mutex, NULL);
- anv_state_pool_init(&state_pool, &device, 64, 0);
+ anv_state_pool_init(&state_pool, &device, 4096, 64, 0);
pthread_barrier_init(&barrier, NULL, NUM_THREADS);