2 * Copyright © 2008-2012 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
31 * Public definitions of Intel-specific bufmgr functions.
34 #ifndef INTEL_BUFMGR_H
35 #define INTEL_BUFMGR_H
42 #include "c11/threads.h"
43 #include "util/u_atomic.h"
44 #include "util/list.h"
46 #if defined(__cplusplus)
50 struct gen_device_info
;
54 * Memory zones. When allocating a buffer, you can request that it is
55 * placed into a specific region of the virtual address space (PPGTT).
57 * Most buffers can go anywhere (BRW_MEMZONE_OTHER). Some buffers are
58 * accessed via an offset from a base address. STATE_BASE_ADDRESS has
59 * a maximum 4GB size for each region, so we need to restrict those
60 * buffers to be within 4GB of the base. Each memory zone corresponds
61 * to a particular base address.
63 * Currently, i965 partitions the address space into two regions:
66 * - Full 48-bit address space
68 * Eventually, we hope to carve out 4GB of VMA for each base address.
70 enum brw_memory_zone
{
74 /* Shaders - Instruction State Base Address */
75 BRW_MEMZONE_SHADER
= BRW_MEMZONE_LOW_4G
,
77 /* Scratch - General State Base Address */
78 BRW_MEMZONE_SCRATCH
= BRW_MEMZONE_LOW_4G
,
80 /* Surface State Base Address */
81 BRW_MEMZONE_SURFACE
= BRW_MEMZONE_LOW_4G
,
83 /* Dynamic State Base Address */
84 BRW_MEMZONE_DYNAMIC
= BRW_MEMZONE_LOW_4G
,
87 #define BRW_MEMZONE_COUNT (BRW_MEMZONE_OTHER + 1)
91 * Size in bytes of the buffer object.
93 * The size may be larger than the size originally requested for the
94 * allocation, such as being aligned to page size.
98 /** Buffer manager context associated with this buffer object */
99 struct brw_bufmgr
*bufmgr
;
101 /** The GEM handle for this buffer object. */
105 * Offset of the buffer inside the Graphics Translation Table.
107 * This is effectively our GPU address for the buffer and we use it
108 * as our base for all state pointers into the buffer. However, since the
109 * kernel may be forced to move it around during the course of the
110 * buffer's lifetime, we can only know where the buffer was on the last
111 * execbuf. We presume, and are usually right, that the buffer will not
112 * move and so we use that last offset for the next batch and by doing
113 * so we can avoid having the kernel perform a relocation fixup pass as
114 * our pointers inside the batch will be using the correct base offset.
116 * Since we do use it as a base address for the next batch of pointers,
117 * the kernel treats our offset as a request, and if possible will
118 * arrange the buffer to placed at that address (trying to balance
119 * the cost of buffer migration versus the cost of performing
120 * relocations). Furthermore, we can force the kernel to place the buffer,
121 * or report a failure if we specified a conflicting offset, at our chosen
122 * offset by specifying EXEC_OBJECT_PINNED.
124 * Note the GTT may be either per context, or shared globally across the
125 * system. On a shared system, our buffers have to contend for address
126 * space with both aperture mappings and framebuffers and so are more
127 * likely to be moved. On a full ppGTT system, each batch exists in its
128 * own GTT, and so each buffer may have their own offset within each
134 * The validation list index for this buffer, or -1 when not in a batch.
135 * Note that a single buffer may be in multiple batches (contexts), and
136 * this is a global field, which refers to the last batch using the BO.
137 * It should not be considered authoritative, but can be used to avoid a
138 * linear walk of the validation list in the common case by guessing that
139 * exec_bos[bo->index] == bo and confirming whether that's the case.
144 * Boolean of whether the GPU is definitely not accessing the buffer.
146 * This is only valid when reusable, since non-reusable
147 * buffers are those that have been shared with other
148 * processes, so we don't know their state.
158 * Kenel-assigned global name for this object
160 * List contains both flink named and prime fd'd objects
162 unsigned int global_name
;
165 * Current tiling mode
167 uint32_t tiling_mode
;
168 uint32_t swizzle_mode
;
173 /** Mapped address for the buffer, saved across map/unmap cycles */
175 /** GTT virtual address for the buffer, saved across map/unmap cycles */
177 /** WC CPU address for the buffer, saved across map/unmap cycles */
181 struct list_head head
;
184 * List of GEM handle exports of this buffer (bo_export).
186 * Hold bufmgr->lock when using this list.
188 struct list_head exports
;
191 * Boolean of whether this buffer can be re-used
196 * Boolean of whether this buffer has been shared with an external client.
201 * Boolean of whether this buffer is cache coherent
206 #define BO_ALLOC_BUSY (1<<0)
207 #define BO_ALLOC_ZEROED (1<<1)
210 * Allocate a buffer object.
212 * Buffer objects are not necessarily initially mapped into CPU virtual
213 * address space or graphics device aperture. They must be mapped
214 * using brw_bo_map() to be used by the CPU.
216 struct brw_bo
*brw_bo_alloc(struct brw_bufmgr
*bufmgr
, const char *name
,
217 uint64_t size
, enum brw_memory_zone memzone
);
220 * Allocate a tiled buffer object.
222 * Alignment for tiled objects is set automatically; the 'flags'
223 * argument provides a hint about how the object will be used initially.
225 * Valid tiling formats are:
230 struct brw_bo
*brw_bo_alloc_tiled(struct brw_bufmgr
*bufmgr
,
233 enum brw_memory_zone memzone
,
234 uint32_t tiling_mode
,
239 * Allocate a tiled buffer object.
241 * Alignment for tiled objects is set automatically; the 'flags'
242 * argument provides a hint about how the object will be used initially.
244 * Valid tiling formats are:
249 * Note the tiling format may be rejected; callers should check the
250 * 'tiling_mode' field on return, as well as the pitch value, which
251 * may have been rounded up to accommodate for tiling restrictions.
253 struct brw_bo
*brw_bo_alloc_tiled_2d(struct brw_bufmgr
*bufmgr
,
255 int x
, int y
, int cpp
,
256 enum brw_memory_zone memzone
,
257 uint32_t tiling_mode
,
261 /** Takes a reference on a buffer object */
263 brw_bo_reference(struct brw_bo
*bo
)
265 p_atomic_inc(&bo
->refcount
);
269 * Releases a reference on a buffer object, freeing the data if
270 * no references remain.
272 void brw_bo_unreference(struct brw_bo
*bo
);
274 /* Must match MapBufferRange interface (for convenience) */
275 #define MAP_READ GL_MAP_READ_BIT
276 #define MAP_WRITE GL_MAP_WRITE_BIT
277 #define MAP_ASYNC GL_MAP_UNSYNCHRONIZED_BIT
278 #define MAP_PERSISTENT GL_MAP_PERSISTENT_BIT
279 #define MAP_COHERENT GL_MAP_COHERENT_BIT
281 #define MAP_INTERNAL_MASK (0xffu << 24)
282 #define MAP_RAW (0x01 << 24)
285 * Maps the buffer into userspace.
287 * This function will block waiting for any existing execution on the
288 * buffer to complete, first. The resulting mapping is returned.
290 MUST_CHECK
void *brw_bo_map(struct brw_context
*brw
, struct brw_bo
*bo
, unsigned flags
);
293 * Reduces the refcount on the userspace mapping of the buffer
296 static inline int brw_bo_unmap(UNUSED
struct brw_bo
*bo
) { return 0; }
298 /** Write data into an object. */
299 int brw_bo_subdata(struct brw_bo
*bo
, uint64_t offset
,
300 uint64_t size
, const void *data
);
302 * Waits for rendering to an object by the GPU to have completed.
304 * This is not required for any access to the BO by bo_map,
305 * bo_subdata, etc. It is merely a way for the driver to implement
308 void brw_bo_wait_rendering(struct brw_bo
*bo
);
311 * Unref a buffer manager instance.
313 void brw_bufmgr_unref(struct brw_bufmgr
*bufmgr
);
316 * Get the current tiling (and resulting swizzling) mode for the bo.
318 * \param buf Buffer to get tiling mode for
319 * \param tiling_mode returned tiling mode
320 * \param swizzle_mode returned swizzling mode
322 int brw_bo_get_tiling(struct brw_bo
*bo
, uint32_t *tiling_mode
,
323 uint32_t *swizzle_mode
);
326 * Create a visible name for a buffer which can be used by other apps
328 * \param buf Buffer to create a name for
329 * \param name Returned name
331 int brw_bo_flink(struct brw_bo
*bo
, uint32_t *name
);
334 * Returns 1 if mapping the buffer for write could cause the process
335 * to block, due to the object being active in the GPU.
337 int brw_bo_busy(struct brw_bo
*bo
);
340 * Specify the volatility of the buffer.
341 * \param bo Buffer to create a name for
342 * \param madv The purgeable status
344 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
345 * reclaimed under memory pressure. If you subsequently require the buffer,
346 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
348 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
349 * marked as I915_MADV_DONTNEED.
351 int brw_bo_madvise(struct brw_bo
*bo
, int madv
);
353 /* drm_bacon_bufmgr_gem.c */
354 struct brw_bufmgr
*brw_bufmgr_get_for_fd(struct gen_device_info
*devinfo
, int fd
,
357 struct brw_bo
*brw_bo_gem_create_from_name(struct brw_bufmgr
*bufmgr
,
359 unsigned int handle
);
361 int brw_bo_wait(struct brw_bo
*bo
, int64_t timeout_ns
);
363 uint32_t brw_create_hw_context(struct brw_bufmgr
*bufmgr
);
365 int brw_hw_context_set_priority(struct brw_bufmgr
*bufmgr
,
369 void brw_destroy_hw_context(struct brw_bufmgr
*bufmgr
, uint32_t ctx_id
);
371 int brw_bufmgr_get_fd(struct brw_bufmgr
*bufmgr
);
373 int brw_bo_gem_export_to_prime(struct brw_bo
*bo
, int *prime_fd
);
374 struct brw_bo
*brw_bo_gem_create_from_prime(struct brw_bufmgr
*bufmgr
,
376 struct brw_bo
*brw_bo_gem_create_from_prime_tiled(struct brw_bufmgr
*bufmgr
,
378 uint32_t tiling_mode
,
381 uint32_t brw_bo_export_gem_handle(struct brw_bo
*bo
);
384 * Exports a bo as a GEM handle into a given DRM file descriptor
385 * \param bo Buffer to export
386 * \param drm_fd File descriptor where the new handle is created
387 * \param out_handle Pointer to store the new handle
389 * Returns 0 if the buffer was successfully exported, a non zero error code
392 int brw_bo_export_gem_handle_for_device(struct brw_bo
*bo
, int drm_fd
,
393 uint32_t *out_handle
);
395 int brw_reg_read(struct brw_bufmgr
*bufmgr
, uint32_t offset
,
398 bool brw_using_softpin(struct brw_bufmgr
*bufmgr
);
402 #if defined(__cplusplus)
405 #endif /* INTEL_BUFMGR_H */