iris: include p_defines.h in iris_bufmgr.h
[mesa.git] / src / gallium / drivers / iris / iris_bufmgr.h
1 /*
2 * Copyright © 2017 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef IRIS_BUFMGR_H
25 #define IRIS_BUFMGR_H
26
27 #include <stdbool.h>
28 #include <stdint.h>
29 #include <stdio.h>
30 #include <sys/types.h>
31 #include "util/macros.h"
32 #include "util/u_atomic.h"
33 #include "util/list.h"
34 #include "pipe/p_defines.h"
35
36 struct gen_device_info;
37 struct pipe_debug_callback;
38
39 enum iris_memory_zone {
40 IRIS_MEMZONE_DYNAMIC,
41 IRIS_MEMZONE_SURFACE,
42 IRIS_MEMZONE_SHADER,
43 IRIS_MEMZONE_OTHER,
44 };
45
46 #define IRIS_MEMZONE_COUNT (IRIS_MEMZONE_OTHER + 1)
47
48 struct iris_bo {
49 /**
50 * Size in bytes of the buffer object.
51 *
52 * The size may be larger than the size originally requested for the
53 * allocation, such as being aligned to page size.
54 */
55 uint64_t size;
56
57 /** Buffer manager context associated with this buffer object */
58 struct iris_bufmgr *bufmgr;
59
60 /** The GEM handle for this buffer object. */
61 uint32_t gem_handle;
62
63 /**
64 * Virtual address of the buffer inside the PPGTT (Per-Process Graphics
65 * Translation Table).
66 *
67 * Although each hardware context has its own VMA, we assign BO's to the
68 * same address in all contexts, for simplicity.
69 */
70 uint64_t gtt_offset;
71
72 /**
73 * The validation list index for this buffer, or -1 when not in a batch.
74 * Note that a single buffer may be in multiple batches (contexts), and
75 * this is a global field, which refers to the last batch using the BO.
76 * It should not be considered authoritative, but can be used to avoid a
77 * linear walk of the validation list in the common case by guessing that
78 * exec_bos[bo->index] == bo and confirming whether that's the case.
79 */
80 unsigned index;
81
82 /**
83 * Boolean of whether the GPU is definitely not accessing the buffer.
84 *
85 * This is only valid when reusable, since non-reusable
86 * buffers are those that have been shared with other
87 * processes, so we don't know their state.
88 */
89 bool idle;
90
91 int refcount;
92 const char *name;
93
94 uint64_t kflags;
95
96 /**
97 * Kenel-assigned global name for this object
98 *
99 * List contains both flink named and prime fd'd objects
100 */
101 unsigned global_name;
102
103 /**
104 * Current tiling mode
105 */
106 uint32_t tiling_mode;
107 uint32_t swizzle_mode;
108 uint32_t stride;
109
110 time_t free_time;
111
112 /** Mapped address for the buffer, saved across map/unmap cycles */
113 void *map_cpu;
114 /** GTT virtual address for the buffer, saved across map/unmap cycles */
115 void *map_gtt;
116 /** WC CPU address for the buffer, saved across map/unmap cycles */
117 void *map_wc;
118
119 /** BO cache list */
120 struct list_head head;
121
122 /**
123 * Boolean of whether this buffer can be re-used
124 */
125 bool reusable;
126
127 /**
128 * Boolean of whether this buffer has been shared with an external client.
129 */
130 bool external;
131
132 /**
133 * Boolean of whether this buffer is cache coherent
134 */
135 bool cache_coherent;
136 };
137
138 #define BO_ALLOC_ZEROED (1<<0)
139
140 /**
141 * Allocate a buffer object.
142 *
143 * Buffer objects are not necessarily initially mapped into CPU virtual
144 * address space or graphics device aperture. They must be mapped
145 * using iris_bo_map() to be used by the CPU.
146 */
147 struct iris_bo *iris_bo_alloc(struct iris_bufmgr *bufmgr,
148 const char *name,
149 uint64_t size,
150 enum iris_memory_zone memzone);
151
152 /**
153 * Allocate a tiled buffer object.
154 *
155 * Alignment for tiled objects is set automatically; the 'flags'
156 * argument provides a hint about how the object will be used initially.
157 *
158 * Valid tiling formats are:
159 * I915_TILING_NONE
160 * I915_TILING_X
161 * I915_TILING_Y
162 */
163 struct iris_bo *iris_bo_alloc_tiled(struct iris_bufmgr *bufmgr,
164 const char *name,
165 uint64_t size,
166 uint32_t tiling_mode,
167 uint32_t pitch,
168 unsigned flags,
169 enum iris_memory_zone memzone);
170
171 /** Takes a reference on a buffer object */
172 static inline void
173 iris_bo_reference(struct iris_bo *bo)
174 {
175 p_atomic_inc(&bo->refcount);
176 }
177
178 /**
179 * Releases a reference on a buffer object, freeing the data if
180 * no references remain.
181 */
182 void iris_bo_unreference(struct iris_bo *bo);
183
184 #define MAP_READ PIPE_TRANSFER_READ
185 #define MAP_WRITE PIPE_TRANSFER_WRITE
186 #define MAP_ASYNC PIPE_TRANSFER_UNSYNCHRONIZED
187 #define MAP_PERSISTENT PIPE_TRANSFER_PERSISTENT
188 #define MAP_COHERENT PIPE_TRANSFER_COHERENT
189 /* internal */
190 #define MAP_INTERNAL_MASK (0xff << 24)
191 #define MAP_RAW (0x01 << 24)
192
193 /**
194 * Maps the buffer into userspace.
195 *
196 * This function will block waiting for any existing execution on the
197 * buffer to complete, first. The resulting mapping is returned.
198 */
199 MUST_CHECK void *iris_bo_map(struct pipe_debug_callback *dbg,
200 struct iris_bo *bo, unsigned flags);
201
202 /**
203 * Reduces the refcount on the userspace mapping of the buffer
204 * object.
205 */
206 static inline int iris_bo_unmap(struct iris_bo *bo) { return 0; }
207
208 /** Write data into an object. */
209 int iris_bo_subdata(struct iris_bo *bo, uint64_t offset,
210 uint64_t size, const void *data);
211 /**
212 * Waits for rendering to an object by the GPU to have completed.
213 *
214 * This is not required for any access to the BO by bo_map,
215 * bo_subdata, etc. It is merely a way for the driver to implement
216 * glFinish.
217 */
218 void iris_bo_wait_rendering(struct iris_bo *bo);
219
220 /**
221 * Tears down the buffer manager instance.
222 */
223 void iris_bufmgr_destroy(struct iris_bufmgr *bufmgr);
224
225 /**
226 * Get the current tiling (and resulting swizzling) mode for the bo.
227 *
228 * \param buf Buffer to get tiling mode for
229 * \param tiling_mode returned tiling mode
230 * \param swizzle_mode returned swizzling mode
231 */
232 int iris_bo_get_tiling(struct iris_bo *bo, uint32_t *tiling_mode,
233 uint32_t *swizzle_mode);
234
235 /**
236 * Create a visible name for a buffer which can be used by other apps
237 *
238 * \param buf Buffer to create a name for
239 * \param name Returned name
240 */
241 int iris_bo_flink(struct iris_bo *bo, uint32_t *name);
242
243 /**
244 * Returns 1 if mapping the buffer for write could cause the process
245 * to block, due to the object being active in the GPU.
246 */
247 int iris_bo_busy(struct iris_bo *bo);
248
249 /**
250 * Specify the volatility of the buffer.
251 * \param bo Buffer to create a name for
252 * \param madv The purgeable status
253 *
254 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
255 * reclaimed under memory pressure. If you subsequently require the buffer,
256 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
257 *
258 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
259 * marked as I915_MADV_DONTNEED.
260 */
261 int iris_bo_madvise(struct iris_bo *bo, int madv);
262
263 /* drm_bacon_bufmgr_gem.c */
264 struct iris_bufmgr *iris_bufmgr_init(struct gen_device_info *devinfo, int fd);
265 struct iris_bo *iris_bo_gem_create_from_name(struct iris_bufmgr *bufmgr,
266 const char *name,
267 unsigned handle);
268 void iris_bufmgr_enable_reuse(struct iris_bufmgr *bufmgr);
269
270 int iris_bo_wait(struct iris_bo *bo, int64_t timeout_ns);
271
272 uint32_t iris_create_hw_context(struct iris_bufmgr *bufmgr);
273
274 #define IRIS_CONTEXT_LOW_PRIORITY ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
275 #define IRIS_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
276 #define IRIS_CONTEXT_HIGH_PRIORITY ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
277
278 int iris_hw_context_set_priority(struct iris_bufmgr *bufmgr,
279 uint32_t ctx_id, int priority);
280
281 void iris_destroy_hw_context(struct iris_bufmgr *bufmgr, uint32_t ctx_id);
282
283 int iris_bo_export_dmabuf(struct iris_bo *bo, int *prime_fd);
284 struct iris_bo *iris_bo_import_dmabuf(struct iris_bufmgr *bufmgr, int prime_fd);
285
286 uint32_t iris_bo_export_gem_handle(struct iris_bo *bo);
287
288 int iris_reg_read(struct iris_bufmgr *bufmgr, uint32_t offset, uint64_t *out);
289
290 int drm_ioctl(int fd, unsigned long request, void *arg);
291
292
293 #endif /* IRIS_BUFMGR_H */