170909d0ba23f7a7cd5d117dd93d444ef2b949e8
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.h
1 /*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * @file brw_bufmgr.h
30 *
31 * Public definitions of Intel-specific bufmgr functions.
32 */
33
34 #ifndef INTEL_BUFMGR_H
35 #define INTEL_BUFMGR_H
36
37 #include <stdbool.h>
38 #include <stdint.h>
39 #include <stdio.h>
40 #include "util/list.h"
41
42 #if defined(__cplusplus)
43 extern "C" {
44 #endif
45
46 struct gen_device_info;
47
48 struct brw_bo {
49 /**
50 * Size in bytes of the buffer object.
51 *
52 * The size may be larger than the size originally requested for the
53 * allocation, such as being aligned to page size.
54 */
55 unsigned long size;
56
57 /**
58 * Alignment requirement for object
59 *
60 * Used for GTT mapping & pinning the object.
61 */
62 unsigned long align;
63
64 /**
65 * Virtual address for accessing the buffer data. Only valid while
66 * mapped.
67 */
68 #ifdef __cplusplus
69 void *virt;
70 #else
71 void *virtual;
72 #endif
73
74 /** Buffer manager context associated with this buffer object */
75 struct brw_bufmgr *bufmgr;
76
77 /** The GEM handle for this buffer object. */
78 uint32_t gem_handle;
79
80 /**
81 * Last seen card virtual address (offset from the beginning of the
82 * aperture) for the object. This should be used to fill relocation
83 * entries when calling brw_bo_emit_reloc()
84 */
85 uint64_t offset64;
86
87 /**
88 * Boolean of whether the GPU is definitely not accessing the buffer.
89 *
90 * This is only valid when reusable, since non-reusable
91 * buffers are those that have been shared with other
92 * processes, so we don't know their state.
93 */
94 bool idle;
95
96 int refcount;
97 const char *name;
98
99 /**
100 * Kenel-assigned global name for this object
101 *
102 * List contains both flink named and prime fd'd objects
103 */
104 unsigned int global_name;
105
106 /**
107 * Current tiling mode
108 */
109 uint32_t tiling_mode;
110 uint32_t swizzle_mode;
111 unsigned long stride;
112
113 time_t free_time;
114
115 /** Mapped address for the buffer, saved across map/unmap cycles */
116 void *mem_virtual;
117 /** GTT virtual address for the buffer, saved across map/unmap cycles */
118 void *gtt_virtual;
119 /** WC CPU address for the buffer, saved across map/unmap cycles */
120 void *wc_virtual;
121 int map_count;
122 struct list_head vma_list;
123
124 /** BO cache list */
125 struct list_head head;
126
127 /**
128 * Boolean of whether this buffer can be re-used
129 */
130 bool reusable;
131 };
132
133 #define BO_ALLOC_FOR_RENDER (1<<0)
134
135 /**
136 * Allocate a buffer object.
137 *
138 * Buffer objects are not necessarily initially mapped into CPU virtual
139 * address space or graphics device aperture. They must be mapped
140 * using bo_map() or brw_bo_map_gtt() to be used by the CPU.
141 */
142 struct brw_bo *brw_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
143 unsigned long size, unsigned int alignment);
144
145 /**
146 * Allocate a tiled buffer object.
147 *
148 * Alignment for tiled objects is set automatically; the 'flags'
149 * argument provides a hint about how the object will be used initially.
150 *
151 * Valid tiling formats are:
152 * I915_TILING_NONE
153 * I915_TILING_X
154 * I915_TILING_Y
155 *
156 * Note the tiling format may be rejected; callers should check the
157 * 'tiling_mode' field on return, as well as the pitch value, which
158 * may have been rounded up to accommodate for tiling restrictions.
159 */
160 struct brw_bo *brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
161 const char *name,
162 int x, int y, int cpp,
163 uint32_t *tiling_mode,
164 unsigned long *pitch,
165 unsigned long flags);
166
167 /** Takes a reference on a buffer object */
168 void brw_bo_reference(struct brw_bo *bo);
169
170 /**
171 * Releases a reference on a buffer object, freeing the data if
172 * no references remain.
173 */
174 void brw_bo_unreference(struct brw_bo *bo);
175
176 /**
177 * Maps the buffer into userspace.
178 *
179 * This function will block waiting for any existing execution on the
180 * buffer to complete, first. The resulting mapping is available at
181 * buf->virtual.
182 */
183 int brw_bo_map(struct brw_bo *bo, int write_enable);
184
185 /**
186 * Reduces the refcount on the userspace mapping of the buffer
187 * object.
188 */
189 int brw_bo_unmap(struct brw_bo *bo);
190
191 /** Write data into an object. */
192 int brw_bo_subdata(struct brw_bo *bo, unsigned long offset,
193 unsigned long size, const void *data);
194 /** Read data from an object. */
195 int brw_bo_get_subdata(struct brw_bo *bo, unsigned long offset,
196 unsigned long size, void *data);
197 /**
198 * Waits for rendering to an object by the GPU to have completed.
199 *
200 * This is not required for any access to the BO by bo_map,
201 * bo_subdata, etc. It is merely a way for the driver to implement
202 * glFinish.
203 */
204 void brw_bo_wait_rendering(struct brw_bo *bo);
205
206 /**
207 * Tears down the buffer manager instance.
208 */
209 void brw_bufmgr_destroy(struct brw_bufmgr *bufmgr);
210
211 /**
212 * Get the current tiling (and resulting swizzling) mode for the bo.
213 *
214 * \param buf Buffer to get tiling mode for
215 * \param tiling_mode returned tiling mode
216 * \param swizzle_mode returned swizzling mode
217 */
218 int brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
219 uint32_t *swizzle_mode);
220
221 /**
222 * Create a visible name for a buffer which can be used by other apps
223 *
224 * \param buf Buffer to create a name for
225 * \param name Returned name
226 */
227 int brw_bo_flink(struct brw_bo *bo, uint32_t *name);
228
229 /**
230 * Returns 1 if mapping the buffer for write could cause the process
231 * to block, due to the object being active in the GPU.
232 */
233 int brw_bo_busy(struct brw_bo *bo);
234
235 /**
236 * Specify the volatility of the buffer.
237 * \param bo Buffer to create a name for
238 * \param madv The purgeable status
239 *
240 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
241 * reclaimed under memory pressure. If you subsequently require the buffer,
242 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
243 *
244 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
245 * marked as I915_MADV_DONTNEED.
246 */
247 int brw_bo_madvise(struct brw_bo *bo, int madv);
248
249 /**
250 * Disable buffer reuse for buffers which will be shared in some way,
251 * as with scanout buffers. When the buffer reference count goes to
252 * zero, it will be freed and not placed in the reuse list.
253 *
254 * \param bo Buffer to disable reuse for
255 */
256 int brw_bo_disable_reuse(struct brw_bo *bo);
257
258 /**
259 * Query whether a buffer is reusable.
260 *
261 * \param bo Buffer to query
262 */
263 int brw_bo_is_reusable(struct brw_bo *bo);
264
265 /* drm_bacon_bufmgr_gem.c */
266 struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo,
267 int fd, int batch_size);
268 struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
269 const char *name,
270 unsigned int handle);
271 void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
272 void brw_bufmgr_gem_set_vma_cache_size(struct brw_bufmgr *bufmgr,
273 int limit);
274 int brw_bo_map_unsynchronized(struct brw_bo *bo);
275 int brw_bo_map_gtt(struct brw_bo *bo);
276
277 void *brw_bo_map__cpu(struct brw_bo *bo);
278 void *brw_bo_map__gtt(struct brw_bo *bo);
279 void *brw_bo_map__wc(struct brw_bo *bo);
280
281 int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
282
283 uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
284 void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
285
286 int brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd);
287 struct brw_bo *brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
288 int prime_fd, int size);
289
290 int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
291 uint64_t *result);
292
293 /** @{ */
294
295 #if defined(__cplusplus)
296 }
297 #endif
298 #endif /* INTEL_BUFMGR_H */