Change block_pool->bo to be a pointer, and update its usage everywhere.
This makes it simpler to switch it later to a list of BOs.
v3:
- Use a static "bos" field in the struct, instead of malloc'ing it.
This will be later changed to a fixed length array of BOs.
Reviewed-by: Jason Ekstrand <jason@jlekstrand.net>
pool->bo_flags = bo_flags;
pool->start_address = gen_canonical_address(start_address);
- anv_bo_init(&pool->bo, 0, 0);
+ pool->bo = &pool->bos;
+
+ anv_bo_init(pool->bo, 0, 0);
pool->fd = memfd_create("block pool", MFD_CLOEXEC);
if (pool->fd == -1)
* the EXEC_OBJECT_SUPPORTS_48B_ADDRESS flag and the kernel does all of the
* hard work for us.
*/
- anv_bo_init(&pool->bo, gem_handle, size);
+ anv_bo_init(pool->bo, gem_handle, size);
if (pool->bo_flags & EXEC_OBJECT_PINNED) {
- pool->bo.offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
+ pool->bo->offset = pool->start_address + BLOCK_POOL_MEMFD_CENTER -
center_bo_offset;
}
- pool->bo.flags = pool->bo_flags;
- pool->bo.map = map;
+ pool->bo->flags = pool->bo_flags;
+ pool->bo->map = map;
return VK_SUCCESS;
}
void*
anv_block_pool_map(struct anv_block_pool *pool, int32_t offset)
{
- return pool->bo.map + pool->center_bo_offset + offset;
+ return pool->bo->map + pool->center_bo_offset + offset;
}
/** Grows and re-centers the block pool.
assert(state == &pool->state || back_used > 0);
- uint32_t old_size = pool->bo.size;
+ uint32_t old_size = pool->bo->size;
/* The block pool is always initialized to a nonzero size and this function
* is always called after initialization.
while (size < back_required + front_required)
size *= 2;
- assert(size > pool->bo.size);
+ assert(size > pool->bo->size);
/* We compute a new center_bo_offset such that, when we double the size
* of the pool, we maintain the ratio of how much is used by each side.
result = anv_block_pool_expand_range(pool, center_bo_offset, size);
- pool->bo.flags = pool->bo_flags;
+ pool->bo->flags = pool->bo_flags;
done:
pthread_mutex_unlock(&pool->device->mutex);
* needs to do so in order to maintain its concurrency model.
*/
if (state == &pool->state) {
- return pool->bo.size - pool->center_bo_offset;
+ return pool->bo->size - pool->center_bo_offset;
} else {
assert(pool->center_bo_offset > 0);
return pool->center_bo_offset;
{
struct anv_state *bt_block = u_vector_head(&cmd_buffer->bt_block_states);
return (struct anv_address) {
- .bo = &anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
+ .bo = anv_binding_table_pool(cmd_buffer->device)->block_pool.bo,
.offset = bt_block->offset,
};
}
* relocations that point to the pool bo with the correct offset.
*/
for (size_t i = 0; i < relocs->num_relocs; i++) {
- if (relocs->reloc_bos[i] == &pool->block_pool.bo) {
+ if (relocs->reloc_bos[i] == pool->block_pool.bo) {
/* Adjust the delta value in the relocation to correctly
* correspond to the new delta. Initially, this value may have
* been negative (if treated as unsigned), but we trust in
* given time. The only option is to always relocate them.
*/
anv_reloc_list_apply(cmd_buffer->device, &cmd_buffer->surface_relocs,
- &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ cmd_buffer->device->surface_state_pool.block_pool.bo,
true /* always relocate surface states */);
/* Since we own all of the batch buffers, we know what values are stored
adjust_relocations_from_state_pool(ss_pool, &cmd_buffer->surface_relocs,
cmd_buffer->last_ss_pool_center);
- VkResult result = anv_execbuf_add_bo(execbuf, &ss_pool->block_pool.bo,
+ VkResult result = anv_execbuf_add_bo(execbuf, ss_pool->block_pool.bo,
&cmd_buffer->surface_relocs, 0,
&cmd_buffer->device->alloc);
if (result != VK_SUCCESS)
anv_state_flush(cmd_buffer->device, tmp_data);
struct blorp_address src = {
- .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = tmp_data.offset,
.mocs = cmd_buffer->device->default_mocs,
};
uint64_t bo_flags;
- struct anv_bo bo;
+ struct anv_bo *bo;
+
+ /* A single BO for now */
+ struct anv_bo bos;
/* The address where the start of the pool is pinned. The various bos that
* are created as the pool grows will have addresses in the range
pc.DestinationAddressType = DAT_PPGTT,
pc.PostSyncOperation = WriteImmediateData,
pc.Address = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
pc.ImmediateData = VK_EVENT_SET;
pc.DestinationAddressType = DAT_PPGTT;
pc.PostSyncOperation = WriteImmediateData;
pc.Address = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
pc.ImmediateData = VK_EVENT_RESET;
sem.CompareOperation = COMPARE_SAD_EQUAL_SDD,
sem.SemaphoreDataDword = VK_EVENT_SET,
sem.SemaphoreAddress = (struct anv_address) {
- &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ cmd_buffer->device->dynamic_state_pool.block_pool.bo,
event->state.offset
};
}
{
struct anv_cmd_buffer *cmd_buffer = batch->driver_batch;
return (struct blorp_address) {
- .buffer = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ .buffer = cmd_buffer->device->surface_state_pool.block_pool.bo,
.offset = 0,
};
}
anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 64);
*addr = (struct blorp_address) {
- .buffer = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .buffer = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = vb_state.offset,
.mocs = cmd_buffer->device->default_mocs,
};
sba.SurfaceStateBaseAddressModifyEnable = true;
sba.DynamicStateBaseAddress =
- (struct anv_address) { &device->dynamic_state_pool.block_pool.bo, 0 };
+ (struct anv_address) { device->dynamic_state_pool.block_pool.bo, 0 };
sba.DynamicStateMOCS = GENX(MOCS);
sba.DynamicStateBaseAddressModifyEnable = true;
sba.IndirectObjectBaseAddressModifyEnable = true;
sba.InstructionBaseAddress =
- (struct anv_address) { &device->instruction_state_pool.block_pool.bo, 0 };
+ (struct anv_address) { device->instruction_state_pool.block_pool.bo, 0 };
sba.InstructionMOCS = GENX(MOCS);
sba.InstructionBaseAddressModifyEnable = true;
assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
struct anv_address ss_clear_addr = {
- .bo = &cmd_buffer->device->surface_state_pool.block_pool.bo,
+ .bo = cmd_buffer->device->surface_state_pool.block_pool.bo,
.offset = surface_state.offset +
cmd_buffer->device->isl_dev.ss.clear_value_offset,
};
* we allocated for them in BeginCommandBuffer.
*/
struct anv_bo *ss_bo =
- &primary->device->surface_state_pool.block_pool.bo;
+ primary->device->surface_state_pool.block_pool.bo;
struct anv_state src_state = primary->state.render_pass_states;
struct anv_state dst_state = secondary->state.render_pass_states;
assert(src_state.alloc_size == dst_state.alloc_size);
anv_cmd_buffer_alloc_surface_state(cmd_buffer);
struct anv_address constant_data = {
- .bo = &pipeline->device->dynamic_state_pool.block_pool.bo,
+ .bo = pipeline->device->dynamic_state_pool.block_pool.bo,
.offset = pipeline->shaders[stage]->constant_data.offset,
};
unsigned constant_data_size =
uint32_t read_len;
if (binding->set == ANV_DESCRIPTOR_SET_SHADER_CONSTANTS) {
struct anv_address constant_data = {
- .bo = &pipeline->device->dynamic_state_pool.block_pool.bo,
+ .bo = pipeline->device->dynamic_state_pool.block_pool.bo,
.offset = pipeline->shaders[stage]->constant_data.offset,
};
unsigned constant_data_size =
if (state.alloc_size > 0) {
c.ConstantBody.Buffer[n] = (struct anv_address) {
- .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = state.offset,
};
c.ConstantBody.ReadLength[n] =
anv_state_flush(cmd_buffer->device, id_state);
struct anv_address addr = {
- .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = id_state.offset,
};
anv_state_flush(cmd_buffer->device, state);
struct anv_address addr = {
- .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = state.offset,
};
sizes[2] = groupCountZ;
anv_state_flush(cmd_buffer->device, state);
cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
- .bo = &cmd_buffer->device->dynamic_state_pool.block_pool.bo,
+ .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
.offset = state.offset,
};
}