etnaviv: Add valgrind support
authorMarek Vasut <marex@denx.de>
Sat, 8 Jun 2019 22:18:29 +0000 (00:18 +0200)
committerLucas Stach <l.stach@pengutronix.de>
Wed, 14 Aug 2019 08:36:20 +0000 (10:36 +0200)
Add Valgrind support for etnaviv to track BO leaks.

Signed-off-by: Marek Vasut <marex@denx.de>
Reviewed-by: Christian Gmeiner <christian.gmeiner@gmail.com>
Reviewed-by: Lucas Stach <l.stach@pengutronix.de>
src/etnaviv/drm/etnaviv_bo.c
src/etnaviv/drm/etnaviv_bo_cache.c
src/etnaviv/drm/etnaviv_priv.h

index 6436fea416244b46a1195b339922eb302fb7c8ca..ccf5da7c8c7ae7c4978b9ba7b22ff7351e9f7fe2 100644 (file)
@@ -44,6 +44,8 @@ static void set_name(struct etna_bo *bo, uint32_t name)
 /* Called under etna_drm_table_lock */
 void _etna_bo_del(struct etna_bo *bo)
 {
+       VG_BO_FREE(bo);
+
        if (bo->map)
                os_munmap(bo->map, bo->size);
 
@@ -132,6 +134,8 @@ struct etna_bo *etna_bo_new(struct etna_device *dev, uint32_t size,
        bo->reuse = 1;
        pthread_mutex_unlock(&etna_drm_table_lock);
 
+       VG_BO_ALLOC(bo);
+
        return bo;
 }
 
@@ -188,8 +192,10 @@ struct etna_bo *etna_bo_from_name(struct etna_device *dev,
                goto out_unlock;
 
        bo = bo_from_handle(dev, req.size, req.handle, 0);
-       if (bo)
+       if (bo) {
                set_name(bo, name);
+               VG_BO_ALLOC(bo);
+       }
 
 out_unlock:
        pthread_mutex_unlock(&etna_drm_table_lock);
@@ -229,6 +235,8 @@ struct etna_bo *etna_bo_from_dmabuf(struct etna_device *dev, int fd)
 
        bo = bo_from_handle(dev, size, handle, 0);
 
+       VG_BO_ALLOC(bo);
+
 out_unlock:
        pthread_mutex_unlock(&etna_drm_table_lock);
 
index 918d00d4e1d413646407ec2896673dbf2dd752a1..3b28c1477aaccd61278f652b48e8daa96fb05fb3 100644 (file)
@@ -85,6 +85,7 @@ void etna_bo_cache_cleanup(struct etna_bo_cache *cache, time_t time)
                        if (time && ((time - bo->free_time) <= 1))
                                break;
 
+                       VG_BO_OBTAIN(bo);
                        list_del(&bo->list);
                        _etna_bo_del(bo);
                }
@@ -169,6 +170,7 @@ struct etna_bo *etna_bo_cache_alloc(struct etna_bo_cache *cache, uint32_t *size,
                *size = bucket->size;
                bo = find_in_bucket(bucket, flags);
                if (bo) {
+                       VG_BO_OBTAIN(bo);
                        p_atomic_set(&bo->refcnt, 1);
                        etna_device_ref(bo->dev);
                        return bo;
@@ -189,6 +191,7 @@ int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo)
                clock_gettime(CLOCK_MONOTONIC, &time);
 
                bo->free_time = time.tv_sec;
+               VG_BO_RELEASE(bo);
                list_addtail(&bo->list, &bucket->list);
                etna_bo_cache_cleanup(cache, time.tv_sec);
 
index d8f053771e927ce0fcfd8ed087bcc2e1afef5a4d..0c770f705407c07ae5aa31396c0bf573b6502c1d 100644 (file)
@@ -205,4 +205,56 @@ static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
        tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
 }
 
+#if HAVE_VALGRIND
+#  include <valgrind/memcheck.h>
+
+/*
+ * For tracking the backing memory (if valgrind enabled, we force a mmap
+ * for the purposes of tracking)
+ */
+static inline void VG_BO_ALLOC(struct etna_bo *bo)
+{
+       if (bo && RUNNING_ON_VALGRIND) {
+               VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo), bo->size, 0, 1);
+       }
+}
+
+static inline void VG_BO_FREE(struct etna_bo *bo)
+{
+       VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+}
+
+/*
+ * For tracking bo structs that are in the buffer-cache, so that valgrind
+ * doesn't attribute ownership to the first one to allocate the recycled
+ * bo.
+ *
+ * Note that the list_head in etna_bo is used to track the buffers in cache
+ * so disable error reporting on the range while they are in cache so
+ * valgrind doesn't squawk about list traversal.
+ *
+ */
+static inline void VG_BO_RELEASE(struct etna_bo *bo)
+{
+       if (RUNNING_ON_VALGRIND) {
+               VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
+               VALGRIND_MAKE_MEM_NOACCESS(bo, sizeof(*bo));
+               VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+       }
+}
+static inline void VG_BO_OBTAIN(struct etna_bo *bo)
+{
+       if (RUNNING_ON_VALGRIND) {
+               VALGRIND_MAKE_MEM_DEFINED(bo, sizeof(*bo));
+               VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
+               VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
+       }
+}
+#else
+static inline void VG_BO_ALLOC(struct etna_bo *bo)   {}
+static inline void VG_BO_FREE(struct etna_bo *bo)    {}
+static inline void VG_BO_RELEASE(struct etna_bo *bo) {}
+static inline void VG_BO_OBTAIN(struct etna_bo *bo)  {}
+#endif
+
 #endif /* ETNAVIV_PRIV_H_ */