if (!device->trace_bo)
return;
- va = device->ws->buffer_get_va(device->trace_bo);
+ va = radv_buffer_get_va(device->trace_bo);
if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)
va += 4;
if (!device->trace_bo)
return;
- va = device->ws->buffer_get_va(device->trace_bo);
+ va = radv_buffer_get_va(device->trace_bo);
switch (ring) {
case RING_GFX:
if (!device->trace_bo)
return;
- va = device->ws->buffer_get_va(device->trace_bo) + 24;
+ va = radv_buffer_get_va(device->trace_bo) + 24;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(device->ws,
cmd_buffer->cs, 4 + MAX_SETS * 2);
struct ac_vs_output_info *outinfo)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
unsigned export_count;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
struct ac_es_output_info *outinfo)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, shader->code_size);
struct radv_shader_variant *shader)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
uint32_t rsrc2 = shader->rsrc2;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
struct radv_shader_variant *shader)
{
struct radeon_winsys *ws = cmd_buffer->device->ws;
- uint64_t va = ws->buffer_get_va(shader->bo) + shader->bo_offset;
+ uint64_t va = radv_buffer_get_va(shader->bo) + shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, shader->code_size);
S_028B90_CNT(MIN2(gs_num_invocations, 127)) |
S_028B90_ENABLE(gs_num_invocations > 0));
- va = ws->buffer_get_va(gs->bo) + gs->bo_offset;
+ va = radv_buffer_get_va(gs->bo) + gs->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, gs->bo, 8);
radv_emit_prefetch(cmd_buffer, va, gs->code_size);
assert (pipeline->shaders[MESA_SHADER_FRAGMENT]);
ps = pipeline->shaders[MESA_SHADER_FRAGMENT];
- va = ws->buffer_get_va(ps->bo) + ps->bo_offset;
+ va = radv_buffer_get_va(ps->bo) + ps->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, ps->bo, 8);
radv_emit_prefetch(cmd_buffer, va, ps->code_size);
VkClearDepthStencilValue ds_clear_value,
VkImageAspectFlags aspects)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
unsigned reg_offset = 0, reg_count = 0;
radv_load_depth_clear_regs(struct radv_cmd_buffer *cmd_buffer,
struct radv_image *image)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
if (!image->surface.htile_size)
bool value)
{
uint64_t pred_val = value;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->dcc_pred_offset;
if (!image->surface.dcc_size)
int idx,
uint32_t color_values[2])
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
if (!image->cmask.size && !image->surface.dcc_size)
struct radv_image *image,
int idx)
{
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(image->bo);
+ uint64_t va = radv_buffer_get_va(image->bo);
va += image->offset + image->clear_value_offset;
if (!image->cmask.size && !image->surface.dcc_size)
&bo_offset))
return;
- set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
set->va += bo_offset;
}
uptr[1] = set_va >> 32;
}
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ uint64_t va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
if (cmd_buffer->state.pipeline) {
memcpy((char*)ptr + layout->push_constant_size, cmd_buffer->dynamic_buffers,
16 * layout->dynamic_offset_count);
- va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(cmd_buffer->device->ws,
uint32_t stride = cmd_buffer->state.pipeline->binding_stride[vb];
device->ws->cs_add_buffer(cmd_buffer->cs, buffer->bo, 8);
- va = device->ws->buffer_get_va(buffer->bo);
+ va = radv_buffer_get_va(buffer->bo);
offset = cmd_buffer->state.vertex_bindings[vb].offset + velems->offset[i];
va += offset + buffer->offset;
desc[3] = velems->rsrc_word3[i];
}
- va = device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += vb_offset;
radv_emit_userdata_address(cmd_buffer, cmd_buffer->state.pipeline, MESA_SHADER_VERTEX,
{
struct radv_device *device = cmd_buffer->device;
if (device->gfx_init) {
- uint64_t va = device->ws->buffer_get_va(device->gfx_init);
+ uint64_t va = radv_buffer_get_va(device->gfx_init);
device->ws->cs_add_buffer(cmd_buffer->cs, device->gfx_init, 8);
radeon_emit(cmd_buffer->cs, PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0));
radeon_emit(cmd_buffer->cs, va);
RADV_FROM_HANDLE(radv_buffer, index_buffer, buffer);
cmd_buffer->state.index_type = indexType; /* vk matches hw */
- cmd_buffer->state.index_va = cmd_buffer->device->ws->buffer_get_va(index_buffer->bo);
+ cmd_buffer->state.index_va = radv_buffer_get_va(index_buffer->bo);
cmd_buffer->state.index_va += index_buffer->offset + offset;
int index_size_shift = cmd_buffer->state.index_type ? 2 : 1;
(void**) &push_set->mapped_ptr))
return;
- push_set->va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ push_set->va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
push_set->va += bo_offset;
radv_update_descriptor_sets(cmd_buffer->device, cmd_buffer,
cmd_buffer->state.emitted_compute_pipeline = pipeline;
compute_shader = pipeline->shaders[MESA_SHADER_COMPUTE];
- va = ws->buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
+ va = radv_buffer_get_va(compute_shader->bo) + compute_shader->bo_offset;
ws->cs_add_buffer(cmd_buffer->cs, compute_shader->bo, 8);
radv_emit_prefetch(cmd_buffer, va, compute_shader->code_size);
RADV_FROM_HANDLE(radv_buffer, count_buffer, _count_buffer);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t indirect_va = cmd_buffer->device->ws->buffer_get_va(buffer->bo);
+ uint64_t indirect_va = radv_buffer_get_va(buffer->bo);
indirect_va += offset + buffer->offset;
uint64_t count_va = 0;
if (count_buffer) {
- count_va = cmd_buffer->device->ws->buffer_get_va(count_buffer->bo);
+ count_va = radv_buffer_get_va(count_buffer->bo);
count_va += count_offset + count_buffer->offset;
}
MAYBE_UNUSED unsigned cdw_max = radeon_check_space(ws, cs, 25);
if (info->indirect) {
- uint64_t va = ws->buffer_get_va(info->indirect->bo);
+ uint64_t va = radv_buffer_get_va(info->indirect->bo);
va += info->indirect->offset + info->indirect_offset;
unsigned value)
{
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
+ uint64_t va = radv_buffer_get_va(event->bo);
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
for (unsigned i = 0; i < eventCount; ++i) {
RADV_FROM_HANDLE(radv_event, event, pEvents[i]);
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(event->bo);
+ uint64_t va = radv_buffer_get_va(event->bo);
cmd_buffer->device->ws->cs_add_buffer(cs, event->bo, 8);
if (!shader)
return;
- start_addr = device->ws->buffer_get_va(shader->bo) + shader->bo_offset;
+ start_addr = radv_buffer_get_va(shader->bo) + shader->bo_offset;
end_addr = start_addr + shader->code_size;
/* See if any wave executes the shader. */
if (pool->current_offset + layout_size <= pool->size) {
set->bo = pool->bo;
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + pool->current_offset);
- set->va = device->ws->buffer_get_va(set->bo) + pool->current_offset;
+ set->va = radv_buffer_get_va(set->bo) + pool->current_offset;
pool->current_offset += layout_size;
list_addtail(&set->vram_list, &pool->vram_list);
} else if (!pool->host_memory_base) {
}
set->bo = pool->bo;
set->mapped_ptr = (uint32_t*)(pool->mapped_ptr + offset);
- set->va = device->ws->buffer_get_va(set->bo) + offset;
+ set->va = radv_buffer_get_va(set->bo) + offset;
list_add(&set->vram_list, prev);
} else
return vk_error(VK_ERROR_OUT_OF_POOL_MEMORY_KHR);
const VkDescriptorBufferInfo *buffer_info)
{
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
- uint64_t va = device->ws->buffer_get_va(buffer->bo);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
uint32_t range = buffer_info->range;
if (buffer_info->range == VK_WHOLE_SIZE)
const VkDescriptorBufferInfo *buffer_info)
{
RADV_FROM_HANDLE(radv_buffer, buffer, buffer_info->buffer);
- uint64_t va = device->ws->buffer_get_va(buffer->bo);
+ uint64_t va = radv_buffer_get_va(buffer->bo);
unsigned size = buffer_info->range;
if (buffer_info->range == VK_WHOLE_SIZE)
uint32_t *desc = &map[4];
if (esgs_ring_bo)
- esgs_va = queue->device->ws->buffer_get_va(esgs_ring_bo);
+ esgs_va = radv_buffer_get_va(esgs_ring_bo);
if (gsvs_ring_bo)
- gsvs_va = queue->device->ws->buffer_get_va(gsvs_ring_bo);
+ gsvs_va = radv_buffer_get_va(gsvs_ring_bo);
if (tess_factor_ring_bo)
- tess_factor_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ tess_factor_va = radv_buffer_get_va(tess_factor_ring_bo);
if (tess_offchip_ring_bo)
- tess_offchip_va = queue->device->ws->buffer_get_va(tess_offchip_ring_bo);
+ tess_offchip_va = radv_buffer_get_va(tess_offchip_ring_bo);
/* stride 0, num records - size, add tid, swizzle, elsize4,
index stride 64 */
uint32_t *map = (uint32_t*)queue->device->ws->buffer_map(descriptor_bo);
if (scratch_bo) {
- uint64_t scratch_va = queue->device->ws->buffer_get_va(scratch_bo);
+ uint64_t scratch_va = radv_buffer_get_va(scratch_bo);
uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
S_008F04_SWIZZLE_ENABLE(1);
map[0] = scratch_va;
}
if (tess_factor_ring_bo) {
- uint64_t tf_va = queue->device->ws->buffer_get_va(tess_factor_ring_bo);
+ uint64_t tf_va = radv_buffer_get_va(tess_factor_ring_bo);
if (queue->device->physical_device->rad_info.chip_class >= CIK) {
radeon_set_uconfig_reg(cs, R_030938_VGT_TF_RING_SIZE,
S_030938_SIZE(tess_factor_ring_size / 4));
R_00B430_SPI_SHADER_USER_DATA_HS_0,
R_00B530_SPI_SHADER_USER_DATA_LS_0};
- uint64_t va = queue->device->ws->buffer_get_va(descriptor_bo);
+ uint64_t va = radv_buffer_get_va(descriptor_bo);
for (int i = 0; i < ARRAY_SIZE(regs); ++i) {
radeon_set_sh_reg_seq(cs, regs[i], 2);
}
if (compute_scratch_bo) {
- uint64_t scratch_va = queue->device->ws->buffer_get_va(compute_scratch_bo);
+ uint64_t scratch_va = radv_buffer_get_va(compute_scratch_bo);
uint32_t rsrc1 = S_008F04_BASE_ADDRESS_HI(scratch_va >> 32) |
S_008F04_SWIZZLE_ENABLE(1);
/* Intensity is implemented as Red, so treat it that way. */
cb->cb_color_attrib = S_028C74_FORCE_DST_ALPHA_1(desc->swizzle[3] == VK_SWIZZLE_1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
cb->cb_color_base = va >> 8;
}
/* CMASK variables */
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
va += iview->image->cmask.offset;
cb->cb_color_cmask = va >> 8;
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
va += iview->image->dcc_offset;
cb->cb_dcc_base = va >> 8;
cb->cb_dcc_base |= iview->image->surface.tile_swizzle;
}
if (iview->image->fmask.size) {
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset + iview->image->fmask.offset;
cb->cb_color_fmask = va >> 8;
cb->cb_color_fmask |= iview->image->fmask.tile_swizzle;
} else {
ds->db_htile_data_base = 0;
ds->db_htile_surface = 0;
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset;
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset;
s_offs = z_offs = va;
if (device->physical_device->rad_info.chip_class >= GFX9) {
if (!iview->image->surface.has_stencil)
/* Use all of the htile_buffer for depth if there's no stencil. */
ds->db_stencil_info |= S_02803C_TILE_STENCIL_DISABLE(1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset +
iview->image->htile_offset;
ds->db_htile_data_base = va >> 8;
ds->db_htile_surface = S_028ABC_FULL_CACHE(1) |
/* Use all of the htile_buffer for depth if there's no stencil. */
ds->db_stencil_info |= S_028044_TILE_STENCIL_DISABLE(1);
- va = device->ws->buffer_get_va(iview->bo) + iview->image->offset +
+ va = radv_buffer_get_va(iview->bo) + iview->image->offset +
iview->image->htile_offset;
ds->db_htile_data_base = va >> 8;
ds->db_htile_surface = S_028ABC_FULL_CACHE(1);
{
const struct vk_format_description *desc;
unsigned stride;
- uint64_t gpu_address = device->ws->buffer_get_va(buffer->bo);
+ uint64_t gpu_address = radv_buffer_get_va(buffer->bo);
uint64_t va = gpu_address + buffer->offset;
unsigned num_format, data_format;
int first_non_void;
unsigned block_width, bool is_stencil,
uint32_t *state)
{
- uint64_t gpu_address = image->bo ? device->ws->buffer_get_va(image->bo) + image->offset : 0;
+ uint64_t gpu_address = image->bo ? radv_buffer_get_va(image->bo) + image->offset : 0;
uint64_t va = gpu_address;
enum chip_class chip_class = device->physical_device->rad_info.chip_class;
uint64_t meta_va = 0;
/* Initialize the sampler view for FMASK. */
if (image->fmask.size) {
uint32_t fmask_format, num_format;
- uint64_t gpu_address = device->ws->buffer_get_va(image->bo);
+ uint64_t gpu_address = radv_buffer_get_va(image->bo);
uint64_t va;
va = gpu_address + image->offset + image->fmask.offset;
if (size >= 4096)
fill_buffer_shader(cmd_buffer, bo, offset, size, value);
else if (size) {
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(bo);
+ uint64_t va = radv_buffer_get_va(bo);
va += offset;
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, bo, 8);
si_cp_dma_clear_buffer(cmd_buffer, va, size, value);
copy_buffer_shader(cmd_buffer, src_bo, dst_bo,
src_offset, dst_offset, size);
else if (size) {
- uint64_t src_va = cmd_buffer->device->ws->buffer_get_va(src_bo);
- uint64_t dst_va = cmd_buffer->device->ws->buffer_get_va(dst_bo);
+ uint64_t src_va = radv_buffer_get_va(src_bo);
+ uint64_t dst_va = radv_buffer_get_va(dst_bo);
src_va += src_offset;
dst_va += dst_offset;
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
uint64_t words = dataSize / 4;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
+ uint64_t va = radv_buffer_get_va(dst_buffer->bo);
va += dstOffset + dst_buffer->offset;
assert(!(dataSize & 3));
uint64_t va = 0;
if (value) {
- va = cmd_buffer->device->ws->buffer_get_va(image->bo) + image->offset;
+ va = radv_buffer_get_va(image->bo) + image->offset;
va += image->dcc_pred_offset;
}
RADV_FROM_HANDLE(radv_buffer, dst_buffer, dstBuffer);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
unsigned elem_size = (flags & VK_QUERY_RESULT_64_BIT) ? 8 : 4;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
- uint64_t dest_va = cmd_buffer->device->ws->buffer_get_va(dst_buffer->bo);
+ uint64_t va = radv_buffer_get_va(pool->bo);
+ uint64_t dest_va = radv_buffer_get_va(dst_buffer->bo);
dest_va += dst_buffer->offset + dstOffset;
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
{
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
+ uint64_t va = radv_buffer_get_va(pool->bo);
cmd_buffer->device->ws->cs_add_buffer(cmd_buffer->cs, pool->bo, 8);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
+ uint64_t va = radv_buffer_get_va(pool->bo);
va += pool->stride * query;
cmd_buffer->device->ws->cs_add_buffer(cs, pool->bo, 8);
RADV_FROM_HANDLE(radv_cmd_buffer, cmd_buffer, commandBuffer);
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
+ uint64_t va = radv_buffer_get_va(pool->bo);
uint64_t avail_va = va + pool->availability_offset + 4 * query;
va += pool->stride * query;
RADV_FROM_HANDLE(radv_query_pool, pool, queryPool);
bool mec = radv_cmd_buffer_uses_mec(cmd_buffer);
struct radeon_winsys_cs *cs = cmd_buffer->cs;
- uint64_t va = cmd_buffer->device->ws->buffer_get_va(pool->bo);
+ uint64_t va = radv_buffer_get_va(pool->bo);
uint64_t avail_va = va + pool->availability_offset + 4 * query;
uint64_t query_va = va + pool->stride * query;
};
uint32_t syncobj_handle;
-struct radeon_winsys_bo;
struct radeon_winsys_fence;
+struct radeon_winsys_bo {
+ uint64_t va;
+};
struct radv_winsys_sem_counts {
uint32_t syncobj_count;
uint32_t sem_count;
void (*buffer_unmap)(struct radeon_winsys_bo *bo);
- uint64_t (*buffer_get_va)(struct radeon_winsys_bo *bo);
-
void (*buffer_set_metadata)(struct radeon_winsys_bo *bo,
struct radeon_bo_metadata *md);
cs->cdw += count;
}
+static inline uint64_t radv_buffer_get_va(struct radeon_winsys_bo *bo)
+{
+ return bo->va;
+}
+
#endif /* RADV_RADEON_WINSYS_H */
uint32_t *ptr = NULL;
uint64_t va = 0;
if (chip_class == GFX9) {
- va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
+ va = radv_buffer_get_va(cmd_buffer->gfx9_fence_bo) + cmd_buffer->gfx9_fence_offset;
ptr = &cmd_buffer->gfx9_fence_idx;
}
si_cs_emit_cache_flush(cmd_buffer->cs,
radv_cmd_buffer_upload_alloc(cmd_buffer, buf_size, SI_CPDMA_ALIGNMENT, &offset, &ptr);
- va = cmd_buffer->device->ws->buffer_get_va(cmd_buffer->upload.upload_bo);
+ va = radv_buffer_get_va(cmd_buffer->upload.upload_bo);
va += offset;
si_cp_dma_prepare(cmd_buffer, size, size, &dma_flags);
p_atomic_inc(&range->bo->ref_count);
int r = radv_amdgpu_bo_va_op(bo->ws->dev, range->bo->bo, range->bo_offset, range->size,
- range->offset + bo->va, 0, AMDGPU_VA_OP_MAP);
+ range->offset + bo->base.va, 0, AMDGPU_VA_OP_MAP);
if (r)
abort();
}
return; /* TODO: PRT mapping */
int r = radv_amdgpu_bo_va_op(bo->ws->dev, range->bo->bo, range->bo_offset, range->size,
- range->offset + bo->va, 0, AMDGPU_VA_OP_UNMAP);
+ range->offset + bo->base.va, 0, AMDGPU_VA_OP_UNMAP);
if (r)
abort();
radv_amdgpu_winsys_bo_destroy((struct radeon_winsys_bo *)range->bo);
bo->ws->num_buffers--;
pthread_mutex_unlock(&bo->ws->global_bo_list_lock);
}
- radv_amdgpu_bo_va_op(bo->ws->dev, bo->bo, 0, bo->size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
+ radv_amdgpu_bo_va_op(bo->ws->dev, bo->bo, 0, bo->size, bo->base.va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_bo_free(bo->bo);
}
amdgpu_va_range_free(bo->va_handle);
if (r)
goto error_va_alloc;
- bo->va = va;
+ bo->base.va = va;
bo->va_handle = va_handle;
bo->size = size;
bo->ws = ws;
return NULL;
}
-static uint64_t radv_amdgpu_winsys_bo_get_va(struct radeon_winsys_bo *_bo)
-{
- struct radv_amdgpu_winsys_bo *bo = radv_amdgpu_winsys_bo(_bo);
- return bo->va;
-}
-
static void *
radv_amdgpu_winsys_bo_map(struct radeon_winsys_bo *_bo)
{
initial |= RADEON_DOMAIN_GTT;
bo->bo = result.buf_handle;
- bo->va = va;
+ bo->base.va = va;
bo->va_handle = va_handle;
bo->initial_domain = initial;
bo->size = result.alloc_size;
{
ws->base.buffer_create = radv_amdgpu_winsys_bo_create;
ws->base.buffer_destroy = radv_amdgpu_winsys_bo_destroy;
- ws->base.buffer_get_va = radv_amdgpu_winsys_bo_get_va;
ws->base.buffer_map = radv_amdgpu_winsys_bo_map;
ws->base.buffer_unmap = radv_amdgpu_winsys_bo_unmap;
ws->base.buffer_from_fd = radv_amdgpu_winsys_bo_from_fd;
};
struct radv_amdgpu_winsys_bo {
+ struct radeon_winsys_bo base;
amdgpu_va_handle va_handle;
- uint64_t va;
uint64_t size;
struct radv_amdgpu_winsys *ws;
bool is_virtual;
return NULL;
}
- cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
+ cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
cs->base.buf = (uint32_t *)cs->ib_mapped;
cs->base.max_dw = ib_size / 4 - 4;
cs->ib_size_ptr = &cs->ib.size;
cs->ws->base.cs_add_buffer(&cs->base, cs->ib_buffer, 8);
cs->base.buf[cs->base.cdw++] = PKT3(PKT3_INDIRECT_BUFFER_CIK, 2, 0);
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
- cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->va >> 32;
+ cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
+ cs->base.buf[cs->base.cdw++] = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va >> 32;
cs->ib_size_ptr = cs->base.buf + cs->base.cdw;
cs->base.buf[cs->base.cdw++] = S_3F2_CHAIN(1) | S_3F2_VALID(1);
cs->ws->base.buffer_destroy(cs->old_ib_buffers[i]);
cs->num_old_ib_buffers = 0;
- cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->va;
+ cs->ib.ib_mc_address = radv_amdgpu_winsys_bo(cs->ib_buffer)->base.va;
cs->ib_size_ptr = &cs->ib.size;
cs->ib.size = 0;
}
}
ib.size = size;
- ib.ib_mc_address = ws->buffer_get_va(bo);
+ ib.ib_mc_address = radv_buffer_get_va(bo);
request.ip_type = cs0->hw_ip;
request.ring = queue_idx;
bo = (struct radv_amdgpu_winsys_bo*)
(i == cs->num_old_ib_buffers ? cs->ib_buffer : cs->old_ib_buffers[i]);
- if (addr >= bo->va && addr - bo->va < bo->size) {
+ if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0)
- return (char *)ret + (addr - bo->va);
+ return (char *)ret + (addr - bo->base.va);
}
}
if(cs->ws->debug_all_bos) {
pthread_mutex_lock(&cs->ws->global_bo_list_lock);
list_for_each_entry(struct radv_amdgpu_winsys_bo, bo,
&cs->ws->global_bo_list, global_list_item) {
- if (addr >= bo->va && addr - bo->va < bo->size) {
+ if (addr >= bo->base.va && addr - bo->base.va < bo->size) {
if (amdgpu_bo_cpu_map(bo->bo, &ret) == 0) {
pthread_mutex_unlock(&cs->ws->global_bo_list_lock);
- return (char *)ret + (addr - bo->va);
+ return (char *)ret + (addr - bo->base.va);
}
}
}