2 * Copyright (C) 2014-2015 Etnaviv Project
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Christian Gmeiner <christian.gmeiner@gmail.com>
27 #ifndef ETNAVIV_PRIV_H_
28 #define ETNAVIV_PRIV_H_
36 #include <sys/ioctl.h>
43 #include "util/list.h"
44 #include "util/macros.h"
45 #include "util/timespec.h"
46 #include "util/u_atomic.h"
47 #include "util/u_debug.h"
50 #include "etnaviv_drmif.h"
51 #include "drm-uapi/etnaviv_drm.h"
53 struct etna_bo_bucket
{
55 struct list_head list
;
58 struct etna_bo_cache
{
59 struct etna_bo_bucket cache_bucket
[14 * 4];
68 /* tables to keep track of bo's, to avoid "evil-twin" etna_bo objects:
70 * handle_table: maps handle to etna_bo
71 * name_table: maps flink name to etna_bo
73 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
74 * returns a new handle. So we need to figure out if the bo is already
75 * open in the process first, before calling gem-open.
77 void *handle_table
, *name_table
;
79 struct etna_bo_cache bo_cache
;
82 struct util_vma_heap address_space
;
84 int closefd
; /* call close(fd) upon destruction */
87 void etna_bo_cache_init(struct etna_bo_cache
*cache
);
88 void etna_bo_cache_cleanup(struct etna_bo_cache
*cache
, time_t time
);
89 struct etna_bo
*etna_bo_cache_alloc(struct etna_bo_cache
*cache
,
90 uint32_t *size
, uint32_t flags
);
91 int etna_bo_cache_free(struct etna_bo_cache
*cache
, struct etna_bo
*bo
);
93 /* for where @etna_drm_table_lock is already held: */
94 void etna_device_del_locked(struct etna_device
*dev
);
96 /* a GEM buffer object allocated from the DRM device */
98 struct etna_device
*dev
;
99 void *map
; /* userspace mmap'ing (if there is one) */
103 uint32_t name
; /* flink global handle (DRI2 name) */
104 uint64_t offset
; /* offset to mmap() */
105 uint32_t va
; /* GPU virtual address */
109 * To avoid excess hashtable lookups, cache the stream this bo was
110 * last emitted on (since that will probably also be the next ring
113 struct etna_cmd_stream
*current_stream
;
117 struct list_head list
; /* bucket-list entry */
118 time_t free_time
; /* time when added to bucket-list */
122 struct etna_device
*dev
;
129 enum etna_pipe_id id
;
130 struct etna_gpu
*gpu
;
133 struct etna_cmd_stream_priv
{
134 struct etna_cmd_stream base
;
135 struct etna_pipe
*pipe
;
137 uint32_t last_timestamp
;
139 /* submit ioctl related tables: */
142 struct drm_etnaviv_gem_submit_bo
*bos
;
143 uint32_t nr_bos
, max_bos
;
146 struct drm_etnaviv_gem_submit_reloc
*relocs
;
147 uint32_t nr_relocs
, max_relocs
;
150 struct drm_etnaviv_gem_submit_pmr
*pmrs
;
151 uint32_t nr_pmrs
, max_pmrs
;
154 /* should have matching entries in submit.bos: */
155 struct etna_bo
**bos
;
156 uint32_t nr_bos
, max_bos
;
158 /* notify callback if buffer reset happened */
159 void (*force_flush
)(struct etna_cmd_stream
*stream
, void *priv
);
160 void *force_flush_priv
;
165 struct etna_perfmon
{
166 struct list_head domains
;
167 struct etna_pipe
*pipe
;
170 struct etna_perfmon_domain
172 struct list_head head
;
173 struct list_head signals
;
178 struct etna_perfmon_signal
180 struct list_head head
;
181 struct etna_perfmon_domain
*domain
;
186 #define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1))
188 #define enable_debug 0 /* TODO make dynamic */
190 #define INFO_MSG(fmt, ...) \
191 do { debug_printf("[I] "fmt " (%s:%d)\n", \
192 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
193 #define DEBUG_MSG(fmt, ...) \
194 do if (enable_debug) { debug_printf("[D] "fmt " (%s:%d)\n", \
195 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
196 #define WARN_MSG(fmt, ...) \
197 do { debug_printf("[W] "fmt " (%s:%d)\n", \
198 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
199 #define ERROR_MSG(fmt, ...) \
200 do { debug_printf("[E] " fmt " (%s:%d)\n", \
201 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0)
203 #define VOID2U64(x) ((uint64_t)(unsigned long)(x))
205 static inline void get_abs_timeout(struct drm_etnaviv_timespec
*tv
, uint64_t ns
)
208 clock_gettime(CLOCK_MONOTONIC
, &t
);
209 tv
->tv_sec
= t
.tv_sec
+ ns
/ NSEC_PER_SEC
;
210 tv
->tv_nsec
= t
.tv_nsec
+ ns
% NSEC_PER_SEC
;
211 if (tv
->tv_nsec
>= NSEC_PER_SEC
) {
212 tv
->tv_nsec
-= NSEC_PER_SEC
;
218 # include <valgrind/memcheck.h>
221 * For tracking the backing memory (if valgrind enabled, we force a mmap
222 * for the purposes of tracking)
224 static inline void VG_BO_ALLOC(struct etna_bo
*bo
)
226 if (bo
&& RUNNING_ON_VALGRIND
) {
227 VALGRIND_MALLOCLIKE_BLOCK(etna_bo_map(bo
), bo
->size
, 0, 1);
231 static inline void VG_BO_FREE(struct etna_bo
*bo
)
233 VALGRIND_FREELIKE_BLOCK(bo
->map
, 0);
237 * For tracking bo structs that are in the buffer-cache, so that valgrind
238 * doesn't attribute ownership to the first one to allocate the recycled
241 * Note that the list_head in etna_bo is used to track the buffers in cache
242 * so disable error reporting on the range while they are in cache so
243 * valgrind doesn't squawk about list traversal.
246 static inline void VG_BO_RELEASE(struct etna_bo
*bo
)
248 if (RUNNING_ON_VALGRIND
) {
249 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo
, sizeof(*bo
));
250 VALGRIND_MAKE_MEM_NOACCESS(bo
, sizeof(*bo
));
251 VALGRIND_FREELIKE_BLOCK(bo
->map
, 0);
254 static inline void VG_BO_OBTAIN(struct etna_bo
*bo
)
256 if (RUNNING_ON_VALGRIND
) {
257 VALGRIND_MAKE_MEM_DEFINED(bo
, sizeof(*bo
));
258 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo
, sizeof(*bo
));
259 VALGRIND_MALLOCLIKE_BLOCK(bo
->map
, bo
->size
, 0, 1);
263 static inline void VG_BO_ALLOC(struct etna_bo
*bo
) {}
264 static inline void VG_BO_FREE(struct etna_bo
*bo
) {}
265 static inline void VG_BO_RELEASE(struct etna_bo
*bo
) {}
266 static inline void VG_BO_OBTAIN(struct etna_bo
*bo
) {}
269 #endif /* ETNAVIV_PRIV_H_ */