#include "util/list.h"
#include "util/macros.h"
#include "util/u_atomic.h"
+#include "util/u_debug.h"
+#include "util/vma.h"
#include "etnaviv_drmif.h"
-#include "etnaviv_drm.h"
+#include "drm-uapi/etnaviv_drm.h"
struct etna_bo_bucket {
uint32_t size;
struct etna_bo_cache bo_cache;
+ int use_softpin;
+ struct util_vma_heap address_space;
+
int closefd; /* call close(fd) upon destruction */
};
uint32_t *size, uint32_t flags);
int etna_bo_cache_free(struct etna_bo_cache *cache, struct etna_bo *bo);
-/* for where @table_lock is already held: */
+/* for where @etna_drm_table_lock is already held: */
void etna_device_del_locked(struct etna_device *dev);
/* a GEM buffer object allocated from the DRM device */
uint32_t flags;
uint32_t name; /* flink global handle (DRI2 name) */
uint64_t offset; /* offset to mmap() */
+ uint32_t va; /* GPU virtual address */
int refcnt;
- /* in the common case, a bo won't be referenced by more than a single
- * command stream. So to avoid looping over all the bo's in the
- * reloc table to find the idx of a bo that might already be in the
- * table, we cache the idx in the bo. But in order to detect the
- * slow-path where bo is ref'd in multiple streams, we also must track
- * the current_stream for which the idx is valid. See bo2idx().
+ /*
+ * To avoid excess hashtable lookups, cache the stream this bo was
+ * last emitted on (since that will probably also be the next ring
+ * it is emitted on).
*/
struct etna_cmd_stream *current_stream;
uint32_t idx;
uint32_t nr_bos, max_bos;
/* notify callback if buffer reset happened */
- void (*reset_notify)(struct etna_cmd_stream *stream, void *priv);
- void *reset_notify_priv;
+ void (*force_flush)(struct etna_cmd_stream *stream, void *priv);
+ void *force_flush_priv;
+
+ void *bo_table;
};
struct etna_perfmon {
#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
-#define enable_debug 1 /* TODO make dynamic */
+#define enable_debug 0 /* TODO make dynamic */
#define INFO_MSG(fmt, ...) \
- do { drmMsg("[I] "fmt " (%s:%d)\n", \
+ do { debug_printf("[I] "fmt " (%s:%d)\n", \
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
#define DEBUG_MSG(fmt, ...) \
- do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \
+ do if (enable_debug) { debug_printf("[D] "fmt " (%s:%d)\n", \
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
#define WARN_MSG(fmt, ...) \
- do { drmMsg("[W] "fmt " (%s:%d)\n", \
+ do { debug_printf("[W] "fmt " (%s:%d)\n", \
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
#define ERROR_MSG(fmt, ...) \
- do { drmMsg("[E] " fmt " (%s:%d)\n", \
+ do { debug_printf("[E] " fmt " (%s:%d)\n", \
##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
static inline void get_abs_timeout(struct drm_etnaviv_timespec *tv, uint64_t ns)
{
struct timespec t;
- uint32_t s = ns / 1000000000;
clock_gettime(CLOCK_MONOTONIC, &t);
- tv->tv_sec = t.tv_sec + s;
- tv->tv_nsec = t.tv_nsec + ns - (s * 1000000000);
+ tv->tv_sec = t.tv_sec + ns / 1000000000;
+ tv->tv_nsec = t.tv_nsec + ns % 1000000000;
+}
+
+#if HAVE_VALGRIND
+# include <valgrind/memcheck.h>
+
+/*
+ * For tracking the backing memory (if valgrind enabled, we force a mmap
+ * for the purposes of tracking)
+ */
+static inline void VG_BO_ALLOC(struct etna_bo *bo)
+{
+ if (bo && RUNNING_ON_VALGRIND) {
+ VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo), bo->size, 0, 1);
+ }
+}
+
+static inline void VG_BO_FREE(struct etna_bo *bo)
+{
+ VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+}
+
+/*
+ * For tracking bo structs that are in the buffer-cache, so that valgrind
+ * doesn't attribute ownership to the first one to allocate the recycled
+ * bo.
+ *
+ * Note that the list_head in etna_bo is used to track the buffers in cache
+ * so disable error reporting on the range while they are in cache so
+ * valgrind doesn't squawk about list traversal.
+ *
+ */
+static inline void VG_BO_RELEASE(struct etna_bo *bo)
+{
+ if (RUNNING_ON_VALGRIND) {
+ VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
+ VALGRIND_MAKE_MEM_NOACCESS(bo, sizeof(*bo));
+ VALGRIND_FREELIKE_BLOCK(bo->map, 0);
+ }
+}
+static inline void VG_BO_OBTAIN(struct etna_bo *bo)
+{
+ if (RUNNING_ON_VALGRIND) {
+ VALGRIND_MAKE_MEM_DEFINED(bo, sizeof(*bo));
+ VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, sizeof(*bo));
+ VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
+ }
}
+#else
+static inline void VG_BO_ALLOC(struct etna_bo *bo) {}
+static inline void VG_BO_FREE(struct etna_bo *bo) {}
+static inline void VG_BO_RELEASE(struct etna_bo *bo) {}
+static inline void VG_BO_OBTAIN(struct etna_bo *bo) {}
+#endif
#endif /* ETNAVIV_PRIV_H_ */