2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
31 * Public definitions of Intel-specific bufmgr functions.
34 #ifndef INTEL_BUFMGR_H
35 #define INTEL_BUFMGR_H
40 #include "util/u_atomic.h"
41 #include "util/list.h"
43 #if defined(__cplusplus)
47 struct gen_device_info
;
52 * Size in bytes of the buffer object.
54 * The size may be larger than the size originally requested for the
55 * allocation, such as being aligned to page size.
60 * Alignment requirement for object
62 * Used for GTT mapping & pinning the object.
66 /** Buffer manager context associated with this buffer object */
67 struct brw_bufmgr
*bufmgr
;
69 /** The GEM handle for this buffer object. */
73 * Offset of the buffer inside the Graphics Translation Table.
75 * This is effectively our GPU address for the buffer and we use it
76 * as our base for all state pointers into the buffer. However, since the
77 * kernel may be forced to move it around during the course of the
78 * buffer's lifetime, we can only know where the buffer was on the last
79 * execbuf. We presume, and are usually right, that the buffer will not
80 * move and so we use that last offset for the next batch and by doing
81 * so we can avoid having the kernel perform a relocation fixup pass as
82 * our pointers inside the batch will be using the correct base offset.
84 * Since we do use it as a base address for the next batch of pointers,
85 * the kernel treats our offset as a request, and if possible will
86 * arrange the buffer to placed at that address (trying to balance
87 * the cost of buffer migration versus the cost of performing
88 * relocations). Furthermore, we can force the kernel to place the buffer,
89 * or report a failure if we specified a conflicting offset, at our chosen
90 * offset by specifying EXEC_OBJECT_PINNED.
92 * Note the GTT may be either per context, or shared globally across the
93 * system. On a shared system, our buffers have to contend for address
94 * space with both aperture mappings and framebuffers and so are more
95 * likely to be moved. On a full ppGTT system, each batch exists in its
96 * own GTT, and so each buffer may have their own offset within each
102 * The validation list index for this buffer, or -1 when not in a batch.
103 * Note that a single buffer may be in multiple batches (contexts), and
104 * this is a global field, which refers to the last batch using the BO.
105 * It should not be considered authoritative, but can be used to avoid a
106 * linear walk of the validation list in the common case by guessing that
107 * exec_bos[bo->index] == bo and confirming whether that's the case.
112 * Boolean of whether the GPU is definitely not accessing the buffer.
114 * This is only valid when reusable, since non-reusable
115 * buffers are those that have been shared with other
116 * processes, so we don't know their state.
123 #ifndef EXEC_OBJECT_CAPTURE
124 #define EXEC_OBJECT_CAPTURE (1<<7)
129 * Kenel-assigned global name for this object
131 * List contains both flink named and prime fd'd objects
133 unsigned int global_name
;
136 * Current tiling mode
138 uint32_t tiling_mode
;
139 uint32_t swizzle_mode
;
144 /** Mapped address for the buffer, saved across map/unmap cycles */
146 /** GTT virtual address for the buffer, saved across map/unmap cycles */
148 /** WC CPU address for the buffer, saved across map/unmap cycles */
152 struct list_head head
;
155 * Boolean of whether this buffer can be re-used
160 * Boolean of whether this buffer has been shared with an external client.
165 * Boolean of whether this buffer is cache coherent
170 #define BO_ALLOC_BUSY (1<<0)
171 #define BO_ALLOC_ZEROED (1<<1)
174 * Allocate a buffer object.
176 * Buffer objects are not necessarily initially mapped into CPU virtual
177 * address space or graphics device aperture. They must be mapped
178 * using brw_bo_map() to be used by the CPU.
180 struct brw_bo
*brw_bo_alloc(struct brw_bufmgr
*bufmgr
, const char *name
,
181 uint64_t size
, uint64_t alignment
);
184 * Allocate a tiled buffer object.
186 * Alignment for tiled objects is set automatically; the 'flags'
187 * argument provides a hint about how the object will be used initially.
189 * Valid tiling formats are:
194 struct brw_bo
*brw_bo_alloc_tiled(struct brw_bufmgr
*bufmgr
,
197 uint32_t tiling_mode
,
202 * Allocate a tiled buffer object.
204 * Alignment for tiled objects is set automatically; the 'flags'
205 * argument provides a hint about how the object will be used initially.
207 * Valid tiling formats are:
212 * Note the tiling format may be rejected; callers should check the
213 * 'tiling_mode' field on return, as well as the pitch value, which
214 * may have been rounded up to accommodate for tiling restrictions.
216 struct brw_bo
*brw_bo_alloc_tiled_2d(struct brw_bufmgr
*bufmgr
,
218 int x
, int y
, int cpp
,
219 uint32_t tiling_mode
,
223 /** Takes a reference on a buffer object */
225 brw_bo_reference(struct brw_bo
*bo
)
227 p_atomic_inc(&bo
->refcount
);
231 * Releases a reference on a buffer object, freeing the data if
232 * no references remain.
234 void brw_bo_unreference(struct brw_bo
*bo
);
236 /* Must match MapBufferRange interface (for convenience) */
237 #define MAP_READ GL_MAP_READ_BIT
238 #define MAP_WRITE GL_MAP_WRITE_BIT
239 #define MAP_ASYNC GL_MAP_UNSYNCHRONIZED_BIT
240 #define MAP_PERSISTENT GL_MAP_PERSISTENT_BIT
241 #define MAP_COHERENT GL_MAP_COHERENT_BIT
243 #define MAP_INTERNAL_MASK (0xff << 24)
244 #define MAP_RAW (0x01 << 24)
247 * Maps the buffer into userspace.
249 * This function will block waiting for any existing execution on the
250 * buffer to complete, first. The resulting mapping is returned.
252 MUST_CHECK
void *brw_bo_map(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
);
255 * Reduces the refcount on the userspace mapping of the buffer
258 static inline int brw_bo_unmap(struct brw_bo
*bo
) { return 0; }
260 /** Write data into an object. */
261 int brw_bo_subdata(struct brw_bo
*bo
, uint64_t offset
,
262 uint64_t size
, const void *data
);
264 * Waits for rendering to an object by the GPU to have completed.
266 * This is not required for any access to the BO by bo_map,
267 * bo_subdata, etc. It is merely a way for the driver to implement
270 void brw_bo_wait_rendering(struct brw_bo
*bo
);
273 * Tears down the buffer manager instance.
275 void brw_bufmgr_destroy(struct brw_bufmgr
*bufmgr
);
278 * Get the current tiling (and resulting swizzling) mode for the bo.
280 * \param buf Buffer to get tiling mode for
281 * \param tiling_mode returned tiling mode
282 * \param swizzle_mode returned swizzling mode
284 int brw_bo_get_tiling(struct brw_bo
*bo
, uint32_t *tiling_mode
,
285 uint32_t *swizzle_mode
);
288 * Create a visible name for a buffer which can be used by other apps
290 * \param buf Buffer to create a name for
291 * \param name Returned name
293 int brw_bo_flink(struct brw_bo
*bo
, uint32_t *name
);
296 * Returns 1 if mapping the buffer for write could cause the process
297 * to block, due to the object being active in the GPU.
299 int brw_bo_busy(struct brw_bo
*bo
);
302 * Specify the volatility of the buffer.
303 * \param bo Buffer to create a name for
304 * \param madv The purgeable status
306 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
307 * reclaimed under memory pressure. If you subsequently require the buffer,
308 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
310 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
311 * marked as I915_MADV_DONTNEED.
313 int brw_bo_madvise(struct brw_bo
*bo
, int madv
);
315 /* drm_bacon_bufmgr_gem.c */
316 struct brw_bufmgr
*brw_bufmgr_init(struct gen_device_info
*devinfo
, int fd
);
317 struct brw_bo
*brw_bo_gem_create_from_name(struct brw_bufmgr
*bufmgr
,
319 unsigned int handle
);
320 void brw_bufmgr_enable_reuse(struct brw_bufmgr
*bufmgr
);
322 int brw_bo_wait(struct brw_bo
*bo
, int64_t timeout_ns
);
324 uint32_t brw_create_hw_context(struct brw_bufmgr
*bufmgr
);
326 #define BRW_CONTEXT_LOW_PRIORITY ((I915_CONTEXT_MIN_USER_PRIORITY-1)/2)
327 #define BRW_CONTEXT_MEDIUM_PRIORITY (I915_CONTEXT_DEFAULT_PRIORITY)
328 #define BRW_CONTEXT_HIGH_PRIORITY ((I915_CONTEXT_MAX_USER_PRIORITY+1)/2)
330 int brw_hw_context_set_priority(struct brw_bufmgr
*bufmgr
,
334 void brw_destroy_hw_context(struct brw_bufmgr
*bufmgr
, uint32_t ctx_id
);
336 int brw_bo_gem_export_to_prime(struct brw_bo
*bo
, int *prime_fd
);
337 struct brw_bo
*brw_bo_gem_create_from_prime(struct brw_bufmgr
*bufmgr
,
340 int brw_reg_read(struct brw_bufmgr
*bufmgr
, uint32_t offset
,
345 #if defined(__cplusplus)
348 #endif /* INTEL_BUFMGR_H */