enum radeon_bo_usage usage)
{
struct amdgpu_winsys_bo *bo = amdgpu_winsys_bo(_buf);
- struct amdgpu_winsys *ws = bo->rws;
+ struct amdgpu_winsys *ws = bo->ws;
int i;
if (bo->is_shared) {
amdgpu_fence_reference(&bo->fence[i], NULL);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->rws->allocated_vram -= align(bo->base.size, bo->rws->gart_page_size);
+ bo->ws->allocated_vram -= align(bo->base.size, bo->ws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->rws->allocated_gtt -= align(bo->base.size, bo->rws->gart_page_size);
+ bo->ws->allocated_gtt -= align(bo->base.size, bo->ws->gart_page_size);
FREE(bo);
}
RADEON_USAGE_READWRITE);
}
- bo->rws->buffer_wait_time += os_time_get_nano() - time;
+ bo->ws->buffer_wait_time += os_time_get_nano() - time;
}
}
/* other functions are never called */
};
-static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *rws,
+static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
unsigned size,
unsigned alignment,
unsigned usage,
return NULL;
}
- pb_cache_init_entry(&rws->bo_cache, &bo->cache_entry, &bo->base);
+ pb_cache_init_entry(&ws->bo_cache, &bo->cache_entry, &bo->base);
request.alloc_size = size;
request.phys_alignment = alignment;
request.flags |= AMDGPU_GEM_CREATE_CPU_GTT_USWC;
}
- r = amdgpu_bo_alloc(rws->dev, &request, &buf_handle);
+ r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
fprintf(stderr, "amdgpu: size : %d bytes\n", size);
goto error_bo_alloc;
}
- r = amdgpu_va_range_alloc(rws->dev, amdgpu_gpu_va_range_general,
+ r = amdgpu_va_range_alloc(ws->dev, amdgpu_gpu_va_range_general,
size, alignment, 0, &va, &va_handle, 0);
if (r)
goto error_va_alloc;
bo->base.usage = usage;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->rws = rws;
+ bo->ws = ws;
bo->bo = buf_handle;
bo->va = va;
bo->va_handle = va_handle;
bo->initial_domain = initial_domain;
- bo->unique_id = __sync_fetch_and_add(&rws->next_bo_unique_id, 1);
+ bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
if (initial_domain & RADEON_DOMAIN_VRAM)
- rws->allocated_vram += align(size, rws->gart_page_size);
+ ws->allocated_vram += align(size, ws->gart_page_size);
else if (initial_domain & RADEON_DOMAIN_GTT)
- rws->allocated_gtt += align(size, rws->gart_page_size);
+ ws->allocated_gtt += align(size, ws->gart_page_size);
return bo;
bo->bo = result.buf_handle;
bo->base.size = result.alloc_size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->rws = ws;
+ bo->ws = ws;
bo->va = va;
bo->va_handle = va_handle;
bo->initial_domain = initial;
bo->base.usage = PB_USAGE_GPU_WRITE | PB_USAGE_GPU_READ;
bo->base.size = size;
bo->base.vtbl = &amdgpu_winsys_bo_vtbl;
- bo->rws = ws;
+ bo->ws = ws;
bo->user_ptr = pointer;
bo->va = va;
bo->va_handle = va_handle;
return key1 != key2;
}
-static bool amdgpu_winsys_unref(struct radeon_winsys *ws)
+static bool amdgpu_winsys_unref(struct radeon_winsys *rws)
{
- struct amdgpu_winsys *rws = (struct amdgpu_winsys*)ws;
+ struct amdgpu_winsys *ws = (struct amdgpu_winsys*)rws;
bool destroy;
/* When the reference counter drops to zero, remove the device pointer
* from the table when the counter drops to 0. */
pipe_mutex_lock(dev_tab_mutex);
- destroy = pipe_reference(&rws->reference, NULL);
+ destroy = pipe_reference(&ws->reference, NULL);
if (destroy && dev_tab)
- util_hash_table_remove(dev_tab, rws->dev);
+ util_hash_table_remove(dev_tab, ws->dev);
pipe_mutex_unlock(dev_tab_mutex);
return destroy;