unsigned index,
struct pipe_driver_query_info *info)
{
+ struct r600_screen *rscreen = (struct r600_screen*)screen;
struct pipe_driver_query_info list[] = {
{"draw-calls", R600_QUERY_DRAW_CALLS, 0},
+ {"requested-VRAM", R600_QUERY_REQUESTED_VRAM, rscreen->info.vram_size, TRUE},
+ {"requested-GTT", R600_QUERY_REQUESTED_GTT, rscreen->info.gart_size, TRUE},
};
if (!info)
#define R600_CONTEXT_FLUSH_AND_INV_DB_META (1 << 7)
#define R600_QUERY_DRAW_CALLS (PIPE_QUERY_DRIVER_SPECIFIC + 0)
+#define R600_QUERY_REQUESTED_VRAM (PIPE_QUERY_DRIVER_SPECIFIC + 1)
+#define R600_QUERY_REQUESTED_GTT (PIPE_QUERY_DRIVER_SPECIFIC + 2)
struct r600_context;
struct r600_bytecode;
/* Non-GPU queries. */
switch (type) {
case R600_QUERY_DRAW_CALLS:
+ case R600_QUERY_REQUESTED_VRAM:
+ case R600_QUERY_REQUESTED_GTT:
return NULL;
}
break;
/* Non-GPU queries. */
case R600_QUERY_DRAW_CALLS:
+ case R600_QUERY_REQUESTED_VRAM:
+ case R600_QUERY_REQUESTED_GTT:
skip_allocation = true;
break;
default:
case R600_QUERY_DRAW_CALLS:
rquery->begin_result = rctx->num_draw_calls;
return;
+ case R600_QUERY_REQUESTED_VRAM:
+ case R600_QUERY_REQUESTED_GTT:
+ rquery->begin_result = 0;
+ return;
}
/* Discard the old query buffers. */
case R600_QUERY_DRAW_CALLS:
rquery->end_result = rctx->num_draw_calls;
return;
+ case R600_QUERY_REQUESTED_VRAM:
+ rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_VRAM_MEMORY);
+ return;
+ case R600_QUERY_REQUESTED_GTT:
+ rquery->end_result = rctx->ws->query_value(rctx->ws, RADEON_REQUESTED_GTT_MEMORY);
+ return;
}
r600_emit_query_end(rctx, rquery);
/* Non-GPU queries. */
switch (query->type) {
case R600_QUERY_DRAW_CALLS:
+ case R600_QUERY_REQUESTED_VRAM:
+ case R600_QUERY_REQUESTED_GTT:
result->u64 = query->end_result - query->begin_result;
return TRUE;
}
}
pipe_mutex_destroy(bo->map_mutex);
+
+ if (bo->initial_domain & RADEON_DOMAIN_VRAM)
+ bo->rws->allocated_vram -= align(bo->base.size, 4096);
+ else if (bo->initial_domain & RADEON_DOMAIN_GTT)
+ bo->rws->allocated_gtt -= align(bo->base.size, 4096);
FREE(bo);
}
bo->rws = mgr->rws;
bo->handle = args.handle;
bo->va = 0;
+ bo->initial_domain = rdesc->initial_domains;
pipe_mutex_init(bo->map_mutex);
if (mgr->va) {
}
}
+ if (rdesc->initial_domains & RADEON_DOMAIN_VRAM)
+ rws->allocated_vram += align(size, 4096);
+ else if (rdesc->initial_domains & RADEON_DOMAIN_GTT)
+ rws->allocated_gtt += align(size, 4096);
+
return &bo->base;
}
}
}
+ ws->allocated_vram += align(open_arg.size, 4096);
+ bo->initial_domain = RADEON_DOMAIN_VRAM;
+
return (struct pb_buffer*)bo;
fail:
uint32_t name;
uint64_t va;
uint64_t va_size;
+ enum radeon_bo_domain initial_domain;
/* how many command streams is this bo referenced in? */
int num_cs_references;
return ts;
}
+static uint64_t radeon_query_value(struct radeon_winsys *rws,
+ enum radeon_value_id value)
+{
+ struct radeon_drm_winsys *ws = (struct radeon_drm_winsys*)rws;
+
+ switch (value) {
+ case RADEON_REQUESTED_VRAM_MEMORY:
+ return ws->allocated_vram;
+ case RADEON_REQUESTED_GTT_MEMORY:
+ return ws->allocated_gtt;
+ }
+ return 0;
+}
+
static unsigned hash_fd(void *key)
{
return pointer_to_intptr(key);
ws->base.surface_init = radeon_drm_winsys_surface_init;
ws->base.surface_best = radeon_drm_winsys_surface_best;
ws->base.query_timestamp = radeon_query_timestamp;
+ ws->base.query_value = radeon_query_value;
radeon_bomgr_init_functions(ws);
radeon_drm_cs_init_functions(ws);
int fd; /* DRM file descriptor */
int num_cs; /* The number of command streams created. */
+ uint64_t allocated_vram;
+ uint64_t allocated_gtt;
enum radeon_generation gen;
struct radeon_info info;
RING_LAST,
};
+enum radeon_value_id {
+ RADEON_REQUESTED_VRAM_MEMORY,
+ RADEON_REQUESTED_GTT_MEMORY,
+};
+
struct winsys_handle;
struct radeon_winsys_cs_handle;
* \param ws The winsys this function is called from.
*/
uint64_t (*query_timestamp)(struct radeon_winsys *ws);
+
+ uint64_t (*query_value)(struct radeon_winsys *ws,
+ enum radeon_value_id value);
};
#endif