iris: bufmgr updates.
[mesa.git] / src / gallium / drivers / iris / iris_bufmgr.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef IRIS_BUFMGR_H
25 #define IRIS_BUFMGR_H
26
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 #include "util/macros.h"
32 #include "util/u_atomic.h"
33 #include "util/list.h"
34
35 struct gen_device_info;
36 struct pipe_debug_callback;
37
38 struct iris_bo {
39 /**
40 * Size in bytes of the buffer object.
41 *
42 * The size may be larger than the size originally requested for the
43 * allocation, such as being aligned to page size.
44 */
45 uint64_t size;
46
47 /** Buffer manager context associated with this buffer object */
48 struct iris_bufmgr *bufmgr;
49
50 /** The GEM handle for this buffer object. */
51 uint32_t gem_handle;
52
53 /**
54 * Offset of the buffer inside the Graphics Translation Table.
55 *
56 * This is effectively our GPU address for the buffer and we use it
57 * as our base for all state pointers into the buffer. However, since the
58 * kernel may be forced to move it around during the course of the
59 * buffer's lifetime, we can only know where the buffer was on the last
60 * execbuf. We presume, and are usually right, that the buffer will not
61 * move and so we use that last offset for the next batch and by doing
62 * so we can avoid having the kernel perform a relocation fixup pass as
63 * our pointers inside the batch will be using the correct base offset.
64 *
65 * Since we do use it as a base address for the next batch of pointers,
66 * the kernel treats our offset as a request, and if possible will
67 * arrange the buffer to placed at that address (trying to balance
68 * the cost of buffer migration versus the cost of performing
69 * relocations). Furthermore, we can force the kernel to place the buffer,
70 * or report a failure if we specified a conflicting offset, at our chosen
71 * offset by specifying EXEC_OBJECT_PINNED.
72 *
73 * Note the GTT may be either per context, or shared globally across the
74 * system. On a shared system, our buffers have to contend for address
75 * space with both aperture mappings and framebuffers and so are more
76 * likely to be moved. On a full ppGTT system, each batch exists in its
77 * own GTT, and so each buffer may have their own offset within each
78 * context.
79 */
80 uint64_t gtt_offset;
81
82 /**
83 * The validation list index for this buffer, or -1 when not in a batch.
84 * Note that a single buffer may be in multiple batches (contexts), and
85 * this is a global field, which refers to the last batch using the BO.
86 * It should not be considered authoritative, but can be used to avoid a
87 * linear walk of the validation list in the common case by guessing that
88 * exec_bos[bo->index] == bo and confirming whether that's the case.
89 */
90 unsigned index;
91
92 /**
93 * Boolean of whether the GPU is definitely not accessing the buffer.
94 *
95 * This is only valid when reusable, since non-reusable
96 * buffers are those that have been shared with other
97 * processes, so we don't know their state.
98 */
99 bool idle;
100
101 int refcount;
102 const char *name;
103
104 uint64_t kflags;
105
106 /**
107 * Kenel-assigned global name for this object
108 *
109 * List contains both flink named and prime fd'd objects
110 */
111 unsigned global_name;
112
113 /**
114 * Current tiling mode
115 */
116 uint32_t tiling_mode;
117 uint32_t swizzle_mode;
118 uint32_t stride;
119
120 time_t free_time;
121
122 /** Mapped address for the buffer, saved across map/unmap cycles */
123 void *map_cpu;
124 /** GTT virtual address for the buffer, saved across map/unmap cycles */
125 void *map_gtt;
126 /** WC CPU address for the buffer, saved across map/unmap cycles */
127 void *map_wc;
128
129 /** BO cache list */
130 struct list_head head;
131
132 /**
133 * Boolean of whether this buffer can be re-used
134 */
135 bool reusable;
136
137 /**
138 * Boolean of whether this buffer has been shared with an external client.
139 */
140 bool external;
141
142 /**
143 * Boolean of whether this buffer is cache coherent
144 */
145 bool cache_coherent;
146 };
147
148 #define BO_ALLOC_ZEROED (1<<0)
149
150 /**
151 * Allocate a buffer object.
152 *
153 * Buffer objects are not necessarily initially mapped into CPU virtual
154 * address space or graphics device aperture. They must be mapped
155 * using iris_bo_map() to be used by the CPU.
156 */
157 struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
158 const char *name,
159 uint64_t size);
160
161 /**
162 * Allocate a tiled buffer object.
163 *
164 * Alignment for tiled objects is set automatically; the 'flags'
165 * argument provides a hint about how the object will be used initially.
166 *
167 * Valid tiling formats are:
168 * I915_TILING_NONE
169 * I915_TILING_X
170 * I915_TILING_Y
171 */
172 struct iris_bo *iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr,
173 const char *name,
174 uint64_t size,
175 uint32_t tiling_mode,
176 uint32_t pitch,
177 unsigned flags);
178
179 /** Takes a reference on a buffer object */
180 static inline void
181 iris_bo_reference(struct iris_bo *bo)
182 {
183 p_atomic_inc(&bo->refcount);
184 }
185
186 /**
187 * Releases a reference on a buffer object, freeing the data if
188 * no references remain.
189 */
190 void iris_bo_unreference(struct iris_bo *bo);
191
192 #define MAP_READ PIPE_TRANSFER_READ
193 #define MAP_WRITE PIPE_TRANSFER_WRITE
194 #define MAP_ASYNC PIPE_TRANSFER_UNSYNCHRONIZED
195 #define MAP_PERSISTENT PIPE_TRANSFER_PERSISTENT
196 #define MAP_COHERENT PIPE_TRANSFER_COHERENT
197 /* internal */
198 #define MAP_INTERNAL_MASK (0xff << 24)
199 #define MAP_RAW (0x01 << 24)
200
201 /**
202 * Maps the buffer into userspace.
203 *
204 * This function will block waiting for any existing execution on the
205 * buffer to complete, first. The resulting mapping is returned.
206 */
207 MUST_CHECK void *iris_bo_map(struct pipe_debug_callback *dbg,
208 struct iris_bo *bo, unsigned flags);
209
210 /**
211 * Reduces the refcount on the userspace mapping of the buffer
212 * object.
213 */
214 static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
215
216 /** Write data into an object. */
217 int iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
218 uint64_t size, const void *data);
219 /**
220 * Waits for rendering to an object by the GPU to have completed.
221 *
222 * This is not required for any access to the BO by bo_map,
223 * bo_subdata, etc. It is merely a way for the driver to implement
224 * glFinish.
225 */
226 void iris_bo_wait_rendering(struct iris_bo *bo);
227
228 /**
229 * Tears down the buffer manager instance.
230 */
231 void iris_bufmgr_destroy(struct iris_bufmgr *bufmgr);
232
233 /**
234 * Get the current tiling (and resulting swizzling) mode for the bo.
235 *
236 * \param buf Buffer to get tiling mode for
237 * \param tiling_mode returned tiling mode
238 * \param swizzle_mode returned swizzling mode
239 */
240 int iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
241 uint32_t *swizzle_mode);
242
243 /**
244 * Create a visible name for a buffer which can be used by other apps
245 *
246 * \param buf Buffer to create a name for
247 * \param name Returned name
248 */
249 int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
250
251 /**
252 * Returns 1 if mapping the buffer for write could cause the process
253 * to block, due to the object being active in the GPU.
254 */
255 int iris_bo_busy(struct iris_bo *bo);
256
257 /**
258 * Specify the volatility of the buffer.
259 * \param bo Buffer to create a name for
260 * \param madv The purgeable status
261 *
262 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
263 * reclaimed under memory pressure. If you subsequently require the buffer,
264 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
265 *
266 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
267 * marked as I915_MADV_DONTNEED.
268 */
269 int iris_bo_madvise(struct iris_bo *bo, int madv);
270
271 /* drm_bacon_bufmgr_gem.c */
272 struct iris_bufmgr *iris_bufmgr_init(struct gen_device_info *devinfo, int fd);
273 struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
274 const char *name,
275 unsigned handle);
276 void iris_bufmgr_enable_reuse(struct iris_bufmgr *bufmgr);
277
278 int iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns);
279
280 uint32_t iris_create_hw_context(struct iris_bufmgr *bufmgr);
281
282 #define IRIS_CONTEXT_LOW_PRIORITY ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
283 #define IRIS_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
284 #define IRIS_CONTEXT_HIGH_PRIORITY ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
285
286 int iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
287 uint32_t ctx_id, int priority);
288
289 void iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
290
291 int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
292 struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd);
293
294 uint32_t iris_bo_export_gem_handle(struct iris_bo *bo);
295
296 int iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *out);
297
298 int drm_ioctl(int fd, unsigned long request, void *arg);
299
300
301 #endif /* IRIS_BUFMGR_H */