assert(bo->bo && "must not be called for slab entries");
- mtx_lock(&bo->ws->global_bo_list_lock);
- LIST_DEL(&bo->u.real.global_list_item);
- bo->ws->num_buffers--;
- mtx_unlock(&bo->ws->global_bo_list_lock);
+ if (bo->ws->debug_all_bos) {
+ mtx_lock(&bo->ws->global_bo_list_lock);
+ LIST_DEL(&bo->u.real.global_list_item);
+ bo->ws->num_buffers--;
+ mtx_unlock(&bo->ws->global_bo_list_lock);
+ }
amdgpu_bo_va_op(bo->bo, 0, bo->base.size, bo->va, 0, AMDGPU_VA_OP_UNMAP);
amdgpu_va_range_free(bo->u.real.va_handle);
assert(bo->bo);
- mtx_lock(&ws->global_bo_list_lock);
- LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
- ws->num_buffers++;
- mtx_unlock(&ws->global_bo_list_lock);
+ if (ws->debug_all_bos) {
+ mtx_lock(&ws->global_bo_list_lock);
+ LIST_ADDTAIL(&bo->u.real.global_list_item, &ws->global_bo_list);
+ ws->num_buffers++;
+ mtx_unlock(&ws->global_bo_list_lock);
+ }
}
static struct amdgpu_winsys_bo *amdgpu_create_bo(struct amdgpu_winsys *ws,
return cs->num_real_buffers;
}
-DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
-
static void amdgpu_add_fence_dependency(struct amdgpu_cs *acs,
struct amdgpu_cs_buffer *buffer)
{
/* Create the buffer list.
* Use a buffer list containing all allocated buffers if requested.
*/
- if (debug_get_option_all_bos()) {
+ if (ws->debug_all_bos) {
struct amdgpu_winsys_bo *bo;
amdgpu_bo_handle *handles;
unsigned num = 0;
static struct util_hash_table *dev_tab = NULL;
static mtx_t dev_tab_mutex = _MTX_INITIALIZER_NP;
+DEBUG_GET_ONCE_BOOL_OPTION(all_bos, "RADEON_ALL_BOS", false)
+
/* Helper function to do the ioctls needed for setup and init. */
static bool do_winsys_init(struct amdgpu_winsys *ws, int fd)
{
}
ws->check_vm = strstr(debug_get_option("R600_DEBUG", ""), "check_vm") != NULL;
+ ws->debug_all_bos = debug_get_option_all_bos();
return true;