i965: Drop EXEC_OBJECT_CAPTURE defines.
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.h
1 /*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * @file brw_bufmgr.h
30 *
31 * Public definitions of Intel-specific bufmgr functions.
32 */
33
34 #ifndef INTEL_BUFMGR_H
35 #define INTEL_BUFMGR_H
36
37 #include <stdbool.h>
38 #include <stdint.h>
39 #include <stdio.h>
40 #include "util/u_atomic.h"
41 #include "util/list.h"
42
43 #if defined(__cplusplus)
44 extern "C" {
45 #endif
46
47 struct gen_device_info;
48 struct brw_context;
49
50 struct brw_bo {
51 /**
52 * Size in bytes of the buffer object.
53 *
54 * The size may be larger than the size originally requested for the
55 * allocation, such as being aligned to page size.
56 */
57 uint64_t size;
58
59 /**
60 * Alignment requirement for object
61 *
62 * Used for GTT mapping & pinning the object.
63 */
64 uint64_t align;
65
66 /** Buffer manager context associated with this buffer object */
67 struct brw_bufmgr *bufmgr;
68
69 /** The GEM handle for this buffer object. */
70 uint32_t gem_handle;
71
72 /**
73 * Offset of the buffer inside the Graphics Translation Table.
74 *
75 * This is effectively our GPU address for the buffer and we use it
76 * as our base for all state pointers into the buffer. However, since the
77 * kernel may be forced to move it around during the course of the
78 * buffer's lifetime, we can only know where the buffer was on the last
79 * execbuf. We presume, and are usually right, that the buffer will not
80 * move and so we use that last offset for the next batch and by doing
81 * so we can avoid having the kernel perform a relocation fixup pass as
82 * our pointers inside the batch will be using the correct base offset.
83 *
84 * Since we do use it as a base address for the next batch of pointers,
85 * the kernel treats our offset as a request, and if possible will
86 * arrange the buffer to placed at that address (trying to balance
87 * the cost of buffer migration versus the cost of performing
88 * relocations). Furthermore, we can force the kernel to place the buffer,
89 * or report a failure if we specified a conflicting offset, at our chosen
90 * offset by specifying EXEC_OBJECT_PINNED.
91 *
92 * Note the GTT may be either per context, or shared globally across the
93 * system. On a shared system, our buffers have to contend for address
94 * space with both aperture mappings and framebuffers and so are more
95 * likely to be moved. On a full ppGTT system, each batch exists in its
96 * own GTT, and so each buffer may have their own offset within each
97 * context.
98 */
99 uint64_t gtt_offset;
100
101 /**
102 * The validation list index for this buffer, or -1 when not in a batch.
103 * Note that a single buffer may be in multiple batches (contexts), and
104 * this is a global field, which refers to the last batch using the BO.
105 * It should not be considered authoritative, but can be used to avoid a
106 * linear walk of the validation list in the common case by guessing that
107 * exec_bos[bo->index] == bo and confirming whether that's the case.
108 */
109 unsigned index;
110
111 /**
112 * Boolean of whether the GPU is definitely not accessing the buffer.
113 *
114 * This is only valid when reusable, since non-reusable
115 * buffers are those that have been shared with other
116 * processes, so we don't know their state.
117 */
118 bool idle;
119
120 int refcount;
121 const char *name;
122
123 uint64_t kflags;
124
125 /**
126 * Kenel-assigned global name for this object
127 *
128 * List contains both flink named and prime fd'd objects
129 */
130 unsigned int global_name;
131
132 /**
133 * Current tiling mode
134 */
135 uint32_t tiling_mode;
136 uint32_t swizzle_mode;
137 uint32_t stride;
138
139 time_t free_time;
140
141 /** Mapped address for the buffer, saved across map/unmap cycles */
142 void *map_cpu;
143 /** GTT virtual address for the buffer, saved across map/unmap cycles */
144 void *map_gtt;
145 /** WC CPU address for the buffer, saved across map/unmap cycles */
146 void *map_wc;
147
148 /** BO cache list */
149 struct list_head head;
150
151 /**
152 * Boolean of whether this buffer can be re-used
153 */
154 bool reusable;
155
156 /**
157 * Boolean of whether this buffer has been shared with an external client.
158 */
159 bool external;
160
161 /**
162 * Boolean of whether this buffer is cache coherent
163 */
164 bool cache_coherent;
165 };
166
167 #define BO_ALLOC_BUSY (1<<0)
168 #define BO_ALLOC_ZEROED (1<<1)
169
170 /**
171 * Allocate a buffer object.
172 *
173 * Buffer objects are not necessarily initially mapped into CPU virtual
174 * address space or graphics device aperture. They must be mapped
175 * using brw_bo_map() to be used by the CPU.
176 */
177 struct brw_bo *brw_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
178 uint64_t size, uint64_t alignment);
179
180 /**
181 * Allocate a tiled buffer object.
182 *
183 * Alignment for tiled objects is set automatically; the 'flags'
184 * argument provides a hint about how the object will be used initially.
185 *
186 * Valid tiling formats are:
187 * I915_TILING_NONE
188 * I915_TILING_X
189 * I915_TILING_Y
190 */
191 struct brw_bo *brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
192 const char *name,
193 uint64_t size,
194 uint32_t tiling_mode,
195 uint32_t pitch,
196 unsigned flags);
197
198 /**
199 * Allocate a tiled buffer object.
200 *
201 * Alignment for tiled objects is set automatically; the 'flags'
202 * argument provides a hint about how the object will be used initially.
203 *
204 * Valid tiling formats are:
205 * I915_TILING_NONE
206 * I915_TILING_X
207 * I915_TILING_Y
208 *
209 * Note the tiling format may be rejected; callers should check the
210 * 'tiling_mode' field on return, as well as the pitch value, which
211 * may have been rounded up to accommodate for tiling restrictions.
212 */
213 struct brw_bo *brw_bo_alloc_tiled_2d(struct brw_bufmgr *bufmgr,
214 const char *name,
215 int x, int y, int cpp,
216 uint32_t tiling_mode,
217 uint32_t *pitch,
218 unsigned flags);
219
220 /** Takes a reference on a buffer object */
221 static inline void
222 brw_bo_reference(struct brw_bo *bo)
223 {
224 p_atomic_inc(&bo->refcount);
225 }
226
227 /**
228 * Releases a reference on a buffer object, freeing the data if
229 * no references remain.
230 */
231 void brw_bo_unreference(struct brw_bo *bo);
232
233 /* Must match MapBufferRange interface (for convenience) */
234 #define MAP_READ GL_MAP_READ_BIT
235 #define MAP_WRITE GL_MAP_WRITE_BIT
236 #define MAP_ASYNC GL_MAP_UNSYNCHRONIZED_BIT
237 #define MAP_PERSISTENT GL_MAP_PERSISTENT_BIT
238 #define MAP_COHERENT GL_MAP_COHERENT_BIT
239 /* internal */
240 #define MAP_INTERNAL_MASK (0xff << 24)
241 #define MAP_RAW (0x01 << 24)
242
243 /**
244 * Maps the buffer into userspace.
245 *
246 * This function will block waiting for any existing execution on the
247 * buffer to complete, first. The resulting mapping is returned.
248 */
249 MUST_CHECK void *brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags);
250
251 /**
252 * Reduces the refcount on the userspace mapping of the buffer
253 * object.
254 */
255 static inline int brw_bo_unmap(struct brw_bo *bo) { return 0; }
256
257 /** Write data into an object. */
258 int brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
259 uint64_t size, const void *data);
260 /**
261 * Waits for rendering to an object by the GPU to have completed.
262 *
263 * This is not required for any access to the BO by bo_map,
264 * bo_subdata, etc. It is merely a way for the driver to implement
265 * glFinish.
266 */
267 void brw_bo_wait_rendering(struct brw_bo *bo);
268
269 /**
270 * Tears down the buffer manager instance.
271 */
272 void brw_bufmgr_destroy(struct brw_bufmgr *bufmgr);
273
274 /**
275 * Get the current tiling (and resulting swizzling) mode for the bo.
276 *
277 * \param buf Buffer to get tiling mode for
278 * \param tiling_mode returned tiling mode
279 * \param swizzle_mode returned swizzling mode
280 */
281 int brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
282 uint32_t *swizzle_mode);
283
284 /**
285 * Create a visible name for a buffer which can be used by other apps
286 *
287 * \param buf Buffer to create a name for
288 * \param name Returned name
289 */
290 int brw_bo_flink(struct brw_bo *bo, uint32_t *name);
291
292 /**
293 * Returns 1 if mapping the buffer for write could cause the process
294 * to block, due to the object being active in the GPU.
295 */
296 int brw_bo_busy(struct brw_bo *bo);
297
298 /**
299 * Specify the volatility of the buffer.
300 * \param bo Buffer to create a name for
301 * \param madv The purgeable status
302 *
303 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
304 * reclaimed under memory pressure. If you subsequently require the buffer,
305 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
306 *
307 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
308 * marked as I915_MADV_DONTNEED.
309 */
310 int brw_bo_madvise(struct brw_bo *bo, int madv);
311
312 /* drm_bacon_bufmgr_gem.c */
313 struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo, int fd);
314 struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
315 const char *name,
316 unsigned int handle);
317 void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
318
319 int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
320
321 uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
322
323 #define BRW_CONTEXT_LOW_PRIORITY ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
324 #define BRW_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
325 #define BRW_CONTEXT_HIGH_PRIORITY ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
326
327 int brw_hw_context_set_priority(struct brw_bufmgr *bufmgr,
328 uint32_t ctx_id,
329 int priority);
330
331 void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
332
333 int brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd);
334 struct brw_bo *brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
335 int prime_fd);
336 struct brw_bo *brw_bo_gem_create_from_prime_tiled(struct brw_bufmgr *bufmgr,
337 int prime_fd,
338 uint32_t tiling_mode,
339 uint32_t stride);
340
341 uint32_t brw_bo_export_gem_handle(struct brw_bo *bo);
342
343 int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
344 uint64_t *result);
345
346 /** @{ */
347
348 #if defined(__cplusplus)
349 }
350 #endif
351 #endif /* INTEL_BUFMGR_H */