info->pipe_interleave_bytes =
256 << G_0098F8_PIPE_INTERLEAVE_SIZE_GFX6(amdinfo->gb_addr_cfg);
}
- info->has_virtual_memory = true;
+ info->r600_has_virtual_memory = true;
assert(util_is_power_of_two_or_zero(dma.available_rings + 1));
assert(util_is_power_of_two_or_zero(compute.available_rings + 1));
uint32_t min_alloc_size;
uint32_t address32_hi;
bool has_dedicated_vram;
- bool has_virtual_memory;
+ bool r600_has_virtual_memory;
bool gfx_ib_pad_with_type2;
bool has_hw_decode;
unsigned ib_start_alignment;
old_buf = res->buf;
res->buf = new_buf; /* should be atomic */
- if (rscreen->info.has_virtual_memory)
+ if (rscreen->info.r600_has_virtual_memory)
res->gpu_address = rscreen->ws->buffer_get_virtual_address(res->buf);
else
res->gpu_address = 0;
return NULL;
}
- if (rscreen->info.has_virtual_memory)
+ if (rscreen->info.r600_has_virtual_memory)
rbuffer->gpu_address =
ws->buffer_get_virtual_address(rbuffer->buf);
else
enum radeon_bo_priority priority)
{
struct radeon_winsys_cs *cs = ring->cs;
- bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.has_virtual_memory;
+ bool has_vm = ((struct r600_common_screen*)rctx->b.screen)->info.r600_has_virtual_memory;
unsigned reloc = radeon_add_to_buffer_list(rctx, ring, rbo, usage, priority);
if (!has_vm) {
{
unsigned dwords = 6;
- if (!screen->info.has_virtual_memory)
+ if (!screen->info.r600_has_virtual_memory)
dwords += 2;
return dwords;
/* If GPUVM is not supported, the CS checker needs 2 entries
* in the buffer list per packet, which has to be done manually.
*/
- if (ctx->screen->info.has_virtual_memory) {
+ if (ctx->screen->info.r600_has_virtual_memory) {
if (dst)
radeon_add_to_buffer_list(ctx, &ctx->dma, dst,
RADEON_USAGE_WRITE,
(int)DIV_ROUND_UP(rscreen->info.max_alloc_size, 1024*1024));
printf("min_alloc_size = %u\n", rscreen->info.min_alloc_size);
printf("has_dedicated_vram = %u\n", rscreen->info.has_dedicated_vram);
- printf("has_virtual_memory = %i\n", rscreen->info.has_virtual_memory);
+ printf("r600_has_virtual_memory = %i\n", rscreen->info.r600_has_virtual_memory);
printf("gfx_ib_pad_with_type2 = %i\n", rscreen->info.gfx_ib_pad_with_type2);
printf("has_hw_decode = %u\n", rscreen->info.has_hw_decode);
printf("num_sdma_rings = %i\n", rscreen->info.num_sdma_rings);
if (bo->u.real.ptr)
os_munmap(bo->u.real.ptr, bo->base.size);
- if (rws->info.has_virtual_memory) {
+ if (rws->info.r600_has_virtual_memory) {
if (rws->va_unmap_working) {
struct drm_radeon_gem_va va;
heap);
}
- if (rws->info.has_virtual_memory) {
+ if (rws->info.r600_has_virtual_memory) {
struct drm_radeon_gem_va va;
unsigned va_gap_size;
/* Sub-allocate small buffers from slabs. */
if (!(flags & RADEON_FLAG_NO_SUBALLOC) &&
size <= (1 << RADEON_SLAB_MAX_SIZE_LOG2) &&
- ws->info.has_virtual_memory &&
+ ws->info.r600_has_virtual_memory &&
alignment <= MAX2(1 << RADEON_SLAB_MIN_SIZE_LOG2, util_next_power_of_two(size))) {
struct pb_slab_entry *entry;
int heap = radeon_get_heap_index(domain, flags);
bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
if (!bo) {
/* Clear the cache and try again. */
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_reclaim(&ws->bo_slabs);
pb_cache_release_all_buffers(&ws->bo_cache);
bo = radeon_create_bo(ws, size, alignment, domain, flags, heap);
mtx_unlock(&ws->bo_handles_mutex);
- if (ws->info.has_virtual_memory) {
+ if (ws->info.r600_has_virtual_memory) {
struct drm_radeon_gem_va va;
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
if (offset)
*offset = whandle->offset;
- if (ws->info.has_virtual_memory && !bo->va) {
+ if (ws->info.r600_has_virtual_memory && !bo->va) {
struct drm_radeon_gem_va va;
bo->va = radeon_bomgr_find_va64(ws, bo->base.size, 1 << 20);
* This doesn't have to be done if virtual memory is enabled,
* because there is no offset patching with virtual memory.
*/
- if (cs->ring_type != RING_DMA || cs->ws->info.has_virtual_memory) {
+ if (cs->ring_type != RING_DMA || cs->ws->info.r600_has_virtual_memory) {
return i;
}
}
cs->cst->flags[0] = 0;
cs->cst->flags[1] = RADEON_CS_RING_DMA;
cs->cst->cs.num_chunks = 3;
- if (cs->ws->info.has_virtual_memory) {
+ if (cs->ws->info.r600_has_virtual_memory) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
}
break;
cs->cst->flags[1] = RADEON_CS_RING_GFX;
cs->cst->cs.num_chunks = 3;
- if (cs->ws->info.has_virtual_memory) {
+ if (cs->ws->info.r600_has_virtual_memory) {
cs->cst->flags[0] |= RADEON_CS_USE_VM;
cs->cst->cs.num_chunks = 3;
}
radeon_get_drm_value(ws->fd, RADEON_INFO_SI_BACKEND_ENABLED_MASK, NULL,
&ws->info.enabled_rb_mask);
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
if (ws->info.drm_minor >= 13) {
uint32_t ib_vm_max_size;
- ws->info.has_virtual_memory = true;
+ ws->info.r600_has_virtual_memory = true;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_VA_START, NULL,
&ws->va_start))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
if (!radeon_get_drm_value(ws->fd, RADEON_INFO_IB_VM_MAX_SIZE, NULL,
&ib_vm_max_size))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
radeon_get_drm_value(ws->fd, RADEON_INFO_VA_UNMAP_WORKING, NULL,
&ws->va_unmap_working);
}
if (ws->gen == DRV_R600 && !debug_get_bool_option("RADEON_VA", false))
- ws->info.has_virtual_memory = false;
+ ws->info.r600_has_virtual_memory = false;
}
/* Get max pipes, this is only needed for compute shaders. All evergreen+
mtx_destroy(&ws->hyperz_owner_mutex);
mtx_destroy(&ws->cmask_owner_mutex);
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_deinit(&ws->bo_slabs);
pb_cache_deinit(&ws->bo_cache);
radeon_bo_destroy,
radeon_bo_can_reclaim);
- if (ws->info.has_virtual_memory) {
+ if (ws->info.r600_has_virtual_memory) {
/* There is no fundamental obstacle to using slab buffer allocation
* without GPUVM, but enabling it requires making sure that the drivers
* honor the address offset.
return &ws->base;
fail_slab:
- if (ws->info.has_virtual_memory)
+ if (ws->info.r600_has_virtual_memory)
pb_slabs_deinit(&ws->bo_slabs);
fail_cache:
pb_cache_deinit(&ws->bo_cache);