#include <amdgpu_drm.h>
#include <xf86drm.h>
#include <stdio.h>
+#include <inttypes.h>
static inline struct amdgpu_winsys_bo *amdgpu_winsys_bo(struct pb_buffer *bo)
{
amdgpu_fence_reference(&bo->fence[i], NULL);
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- bo->ws->allocated_vram -= align(bo->base.size, bo->ws->gart_page_size);
+ bo->ws->allocated_vram -= align64(bo->base.size, bo->ws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- bo->ws->allocated_gtt -= align(bo->base.size, bo->ws->gart_page_size);
+ bo->ws->allocated_gtt -= align64(bo->base.size, bo->ws->gart_page_size);
FREE(bo);
}
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
- unsigned size,
+ uint64_t size,
unsigned alignment,
unsigned usage,
enum radeon_bo_domain initial_domain,
r = amdgpu_bo_alloc(ws->dev, &request, &buf_handle);
if (r) {
fprintf(stderr, "amdgpu: Failed to allocate a buffer:\n");
- fprintf(stderr, "amdgpu: size : %d bytes\n", size);
- fprintf(stderr, "amdgpu: alignment : %d bytes\n", alignment);
- fprintf(stderr, "amdgpu: domains : %d\n", initial_domain);
+ fprintf(stderr, "amdgpu: size : %"PRIu64" bytes\n", size);
+ fprintf(stderr, "amdgpu: alignment : %u bytes\n", alignment);
+ fprintf(stderr, "amdgpu: domains : %u\n", initial_domain);
goto error_bo_alloc;
}
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
if (initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align(size, ws->gart_page_size);
+ ws->allocated_vram += align64(size, ws->gart_page_size);
else if (initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align(size, ws->gart_page_size);
+ ws->allocated_gtt += align64(size, ws->gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
static struct pb_buffer *
amdgpu_bo_create(struct radeon_winsys *rws,
- unsigned size,
+ uint64_t size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain,
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
* like constant/uniform buffers, can benefit from better and more reuse.
*/
- size = align(size, ws->gart_page_size);
+ size = align64(size, ws->gart_page_size);
/* Only set one usage bit each for domains and flags, or the cache manager
* might consider different sets of domains / flags compatible
*offset = whandle->offset;
if (bo->initial_domain & RADEON_DOMAIN_VRAM)
- ws->allocated_vram += align(bo->base.size, ws->gart_page_size);
+ ws->allocated_vram += align64(bo->base.size, ws->gart_page_size);
else if (bo->initial_domain & RADEON_DOMAIN_GTT)
- ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
+ ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
}
static struct pb_buffer *amdgpu_bo_from_ptr(struct radeon_winsys *rws,
- void *pointer, unsigned size)
+ void *pointer, uint64_t size)
{
struct amdgpu_winsys *ws = amdgpu_winsys(rws);
amdgpu_bo_handle buf_handle;
bo->initial_domain = RADEON_DOMAIN_GTT;
bo->unique_id = __sync_fetch_and_add(&ws->next_bo_unique_id, 1);
- ws->allocated_gtt += align(bo->base.size, ws->gart_page_size);
+ ws->allocated_gtt += align64(bo->base.size, ws->gart_page_size);
amdgpu_add_buffer_to_global_list(bo);
static struct pb_buffer *
radeon_winsys_bo_create(struct radeon_winsys *rws,
- unsigned size,
+ uint64_t size,
unsigned alignment,
boolean use_reusable_pool,
enum radeon_bo_domain domain,
struct radeon_bo *bo;
unsigned usage = 0;
+ /* Only 32-bit sizes are supported. */
+ if (size > UINT_MAX)
+ return NULL;
+
/* Align size to page size. This is the minimum alignment for normal
* BOs. Aligning this here helps the cached bufmgr. Especially small BOs,
* like constant/uniform buffers, can benefit from better and more reuse.
}
static struct pb_buffer *radeon_winsys_bo_from_ptr(struct radeon_winsys *rws,
- void *pointer, unsigned size)
+ void *pointer, uint64_t size)
{
struct radeon_drm_winsys *ws = radeon_drm_winsys(rws);
struct drm_radeon_gem_userptr args;