i965: Move fallback size assignment out of bufmgr
[mesa.git] / src / mesa / drivers / dri / i965 / brw_bufmgr.h
1 /*
2 * Copyright © 2008-2012 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 /**
29 * @file brw_bufmgr.h
30 *
31 * Public definitions of Intel-specific bufmgr functions.
32 */
33
34 #ifndef INTEL_BUFMGR_H
35 #define INTEL_BUFMGR_H
36
37 #include <stdbool.h>
38 #include <stdint.h>
39 #include <stdio.h>
40 #include "util/list.h"
41
42 #if defined(__cplusplus)
43 extern "C" {
44 #endif
45
46 struct gen_device_info;
47 struct brw_context;
48
49 struct brw_bo {
50 /**
51 * Size in bytes of the buffer object.
52 *
53 * The size may be larger than the size originally requested for the
54 * allocation, such as being aligned to page size.
55 */
56 uint64_t size;
57
58 /**
59 * Alignment requirement for object
60 *
61 * Used for GTT mapping & pinning the object.
62 */
63 uint64_t align;
64
65 /** Buffer manager context associated with this buffer object */
66 struct brw_bufmgr *bufmgr;
67
68 /** The GEM handle for this buffer object. */
69 uint32_t gem_handle;
70
71 /**
72 * Last seen card virtual address (offset from the beginning of the
73 * aperture) for the object. This should be used to fill relocation
74 * entries when calling brw_bo_emit_reloc()
75 */
76 uint64_t offset64;
77
78 /**
79 * Boolean of whether the GPU is definitely not accessing the buffer.
80 *
81 * This is only valid when reusable, since non-reusable
82 * buffers are those that have been shared with other
83 * processes, so we don't know their state.
84 */
85 bool idle;
86
87 int refcount;
88 const char *name;
89
90 #ifndef EXEC_OBJECT_CAPTURE
91 #define EXEC_OBJECT_CAPTURE (1<<7)
92 #endif
93 uint64_t kflags;
94
95 /**
96 * Kenel-assigned global name for this object
97 *
98 * List contains both flink named and prime fd'd objects
99 */
100 unsigned int global_name;
101
102 /**
103 * Current tiling mode
104 */
105 uint32_t tiling_mode;
106 uint32_t swizzle_mode;
107 uint32_t stride;
108
109 time_t free_time;
110
111 /** Mapped address for the buffer, saved across map/unmap cycles */
112 void *map_cpu;
113 /** GTT virtual address for the buffer, saved across map/unmap cycles */
114 void *map_gtt;
115 /** WC CPU address for the buffer, saved across map/unmap cycles */
116 void *map_wc;
117 int map_count;
118
119 /** BO cache list */
120 struct list_head head;
121
122 /**
123 * Boolean of whether this buffer can be re-used
124 */
125 bool reusable;
126
127 /**
128 * Boolean of whether this buffer is cache coherent
129 */
130 bool cache_coherent;
131 };
132
133 #define BO_ALLOC_FOR_RENDER (1<<0)
134
135 /**
136 * Allocate a buffer object.
137 *
138 * Buffer objects are not necessarily initially mapped into CPU virtual
139 * address space or graphics device aperture. They must be mapped
140 * using brw_bo_map() to be used by the CPU.
141 */
142 struct brw_bo *brw_bo_alloc(struct brw_bufmgr *bufmgr, const char *name,
143 uint64_t size, uint64_t alignment);
144
145 /**
146 * Allocate a tiled buffer object.
147 *
148 * Alignment for tiled objects is set automatically; the 'flags'
149 * argument provides a hint about how the object will be used initially.
150 *
151 * Valid tiling formats are:
152 * I915_TILING_NONE
153 * I915_TILING_X
154 * I915_TILING_Y
155 *
156 * Note the tiling format may be rejected; callers should check the
157 * 'tiling_mode' field on return, as well as the pitch value, which
158 * may have been rounded up to accommodate for tiling restrictions.
159 */
160 struct brw_bo *brw_bo_alloc_tiled(struct brw_bufmgr *bufmgr,
161 const char *name,
162 int x, int y, int cpp,
163 uint32_t tiling_mode,
164 uint32_t *pitch,
165 unsigned flags);
166
167 /** Takes a reference on a buffer object */
168 void brw_bo_reference(struct brw_bo *bo);
169
170 /**
171 * Releases a reference on a buffer object, freeing the data if
172 * no references remain.
173 */
174 void brw_bo_unreference(struct brw_bo *bo);
175
176 /* Must match MapBufferRange interface (for convenience) */
177 #define MAP_READ GL_MAP_READ_BIT
178 #define MAP_WRITE GL_MAP_WRITE_BIT
179 #define MAP_ASYNC GL_MAP_UNSYNCHRONIZED_BIT
180 #define MAP_PERSISTENT GL_MAP_PERSISTENT_BIT
181 #define MAP_COHERENT GL_MAP_COHERENT_BIT
182 /* internal */
183 #define MAP_INTERNAL_MASK (0xff << 24)
184 #define MAP_RAW (0x01 << 24)
185
186 /**
187 * Maps the buffer into userspace.
188 *
189 * This function will block waiting for any existing execution on the
190 * buffer to complete, first. The resulting mapping is returned.
191 */
192 MUST_CHECK void *brw_bo_map(struct brw_context *brw, struct brw_bo *bo, unsigned flags);
193
194 /**
195 * Reduces the refcount on the userspace mapping of the buffer
196 * object.
197 */
198 int brw_bo_unmap(struct brw_bo *bo);
199
200 /** Write data into an object. */
201 int brw_bo_subdata(struct brw_bo *bo, uint64_t offset,
202 uint64_t size, const void *data);
203 /** Read data from an object. */
204 int brw_bo_get_subdata(struct brw_bo *bo, uint64_t offset,
205 uint64_t size, void *data);
206 /**
207 * Waits for rendering to an object by the GPU to have completed.
208 *
209 * This is not required for any access to the BO by bo_map,
210 * bo_subdata, etc. It is merely a way for the driver to implement
211 * glFinish.
212 */
213 void brw_bo_wait_rendering(struct brw_context *brw, struct brw_bo *bo);
214
215 /**
216 * Tears down the buffer manager instance.
217 */
218 void brw_bufmgr_destroy(struct brw_bufmgr *bufmgr);
219
220 /**
221 * Get the current tiling (and resulting swizzling) mode for the bo.
222 *
223 * \param buf Buffer to get tiling mode for
224 * \param tiling_mode returned tiling mode
225 * \param swizzle_mode returned swizzling mode
226 */
227 int brw_bo_get_tiling(struct brw_bo *bo, uint32_t *tiling_mode,
228 uint32_t *swizzle_mode);
229
230 /**
231 * Create a visible name for a buffer which can be used by other apps
232 *
233 * \param buf Buffer to create a name for
234 * \param name Returned name
235 */
236 int brw_bo_flink(struct brw_bo *bo, uint32_t *name);
237
238 /**
239 * Returns 1 if mapping the buffer for write could cause the process
240 * to block, due to the object being active in the GPU.
241 */
242 int brw_bo_busy(struct brw_bo *bo);
243
244 /**
245 * Specify the volatility of the buffer.
246 * \param bo Buffer to create a name for
247 * \param madv The purgeable status
248 *
249 * Use I915_MADV_DONTNEED to mark the buffer as purgeable, and it will be
250 * reclaimed under memory pressure. If you subsequently require the buffer,
251 * then you must pass I915_MADV_WILLNEED to mark the buffer as required.
252 *
253 * Returns 1 if the buffer was retained, or 0 if it was discarded whilst
254 * marked as I915_MADV_DONTNEED.
255 */
256 int brw_bo_madvise(struct brw_bo *bo, int madv);
257
258 /* drm_bacon_bufmgr_gem.c */
259 struct brw_bufmgr *brw_bufmgr_init(struct gen_device_info *devinfo,
260 int fd, int batch_size);
261 struct brw_bo *brw_bo_gem_create_from_name(struct brw_bufmgr *bufmgr,
262 const char *name,
263 unsigned int handle);
264 void brw_bufmgr_enable_reuse(struct brw_bufmgr *bufmgr);
265
266 int brw_bo_wait(struct brw_bo *bo, int64_t timeout_ns);
267
268 uint32_t brw_create_hw_context(struct brw_bufmgr *bufmgr);
269 void brw_destroy_hw_context(struct brw_bufmgr *bufmgr, uint32_t ctx_id);
270
271 int brw_bo_gem_export_to_prime(struct brw_bo *bo, int *prime_fd);
272 struct brw_bo *brw_bo_gem_create_from_prime(struct brw_bufmgr *bufmgr,
273 int prime_fd);
274
275 int brw_reg_read(struct brw_bufmgr *bufmgr, uint32_t offset,
276 uint64_t *result);
277
278 /** @{ */
279
280 #if defined(__cplusplus)
281 }
282 #endif
283 #endif /* INTEL_BUFMGR_H */