ilo: EOL drop unmaintained gallium drv from buildsys
[mesa.git] / src / gallium / drivers / ilo / core / intel_winsys.h
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #ifndef INTEL_WINSYS_H
29 #define INTEL_WINSYS_H
30
31 #include "pipe/p_compiler.h"
32
33 /* this is compatible with i915_drm.h's definitions */
34 enum intel_ring_type {
35 INTEL_RING_RENDER = 1,
36 INTEL_RING_BSD = 2,
37 INTEL_RING_BLT = 3,
38 INTEL_RING_VEBOX = 4,
39 };
40
41 /* this is compatible with i915_drm.h's definitions */
42 enum intel_exec_flag {
43 INTEL_EXEC_GEN7_SOL_RESET = 1 << 8,
44 };
45
46 /* this is compatible with i915_drm.h's definitions */
47 enum intel_reloc_flag {
48 INTEL_RELOC_FENCE = 1 << 0,
49 INTEL_RELOC_GGTT = 1 << 1,
50 INTEL_RELOC_WRITE = 1 << 2,
51 };
52
53 /* this is compatible with i915_drm.h's definitions */
54 enum intel_tiling_mode {
55 INTEL_TILING_NONE = 0,
56 INTEL_TILING_X = 1,
57 INTEL_TILING_Y = 2,
58 };
59
60 struct winsys_handle;
61 struct intel_winsys;
62 struct intel_context;
63 struct intel_bo;
64
65 struct intel_winsys_info {
66 int devid;
67
68 /* the sizes of the aperture in bytes */
69 size_t aperture_total;
70 size_t aperture_mappable;
71
72 bool has_llc;
73 bool has_address_swizzling;
74 bool has_logical_context;
75 bool has_ppgtt;
76
77 /* valid registers for intel_winsys_read_reg() */
78 bool has_timestamp;
79
80 /* valid flags for intel_winsys_submit_bo() */
81 bool has_gen7_sol_reset;
82 };
83
84 void
85 intel_winsys_destroy(struct intel_winsys *winsys);
86
87 const struct intel_winsys_info *
88 intel_winsys_get_info(const struct intel_winsys *winsys);
89
90 /**
91 * Create a logical context for use with the render ring.
92 */
93 struct intel_context *
94 intel_winsys_create_context(struct intel_winsys *winsys);
95
96 /**
97 * Destroy a logical context.
98 */
99 void
100 intel_winsys_destroy_context(struct intel_winsys *winsys,
101 struct intel_context *ctx);
102
103 /**
104 * Read a register. Only registers that are considered safe, such as
105 *
106 * TIMESTAMP (0x2358)
107 *
108 * can be read.
109 */
110 int
111 intel_winsys_read_reg(struct intel_winsys *winsys,
112 uint32_t reg, uint64_t *val);
113
114 /**
115 * Return the numbers of submissions lost due to GPU reset.
116 *
117 * \param active_lost Number of lost active/guilty submissions
118 * \param pending_lost Number of lost pending/innocent submissions
119 */
120 int
121 intel_winsys_get_reset_stats(struct intel_winsys *winsys,
122 struct intel_context *ctx,
123 uint32_t *active_lost,
124 uint32_t *pending_lost);
125 /**
126 * Allocate a buffer object.
127 *
128 * \param name Informative description of the bo.
129 * \param size Size of the bo.
130 * \param cpu_init Will be initialized by CPU.
131 */
132 struct intel_bo *
133 intel_winsys_alloc_bo(struct intel_winsys *winsys,
134 const char *name,
135 unsigned long size,
136 bool cpu_init);
137
138 /**
139 * Create a bo from a user memory pointer. Both \p userptr and \p size must
140 * be page aligned.
141 */
142 struct intel_bo *
143 intel_winsys_import_userptr(struct intel_winsys *winsys,
144 const char *name,
145 void *userptr,
146 unsigned long size,
147 unsigned long flags);
148
149 /**
150 * Create a bo from a winsys handle.
151 */
152 struct intel_bo *
153 intel_winsys_import_handle(struct intel_winsys *winsys,
154 const char *name,
155 const struct winsys_handle *handle,
156 unsigned long height,
157 enum intel_tiling_mode *tiling,
158 unsigned long *pitch);
159
160 /**
161 * Export \p bo as a winsys handle for inter-process sharing. \p tiling and
162 * \p pitch must match those set by \p intel_bo_set_tiling().
163 */
164 int
165 intel_winsys_export_handle(struct intel_winsys *winsys,
166 struct intel_bo *bo,
167 enum intel_tiling_mode tiling,
168 unsigned long pitch,
169 unsigned long height,
170 struct winsys_handle *handle);
171
172 /**
173 * Return true when buffer objects directly specified in \p bo_array, and
174 * those indirectly referenced by them, can fit in the aperture space.
175 */
176 bool
177 intel_winsys_can_submit_bo(struct intel_winsys *winsys,
178 struct intel_bo **bo_array,
179 int count);
180
181 /**
182 * Submit \p bo for execution.
183 *
184 * \p bo and all bos referenced by \p bo will be considered busy until all
185 * commands are parsed and executed. \p ctx is ignored when the bo is not
186 * submitted to the render ring.
187 */
188 int
189 intel_winsys_submit_bo(struct intel_winsys *winsys,
190 enum intel_ring_type ring,
191 struct intel_bo *bo, int used,
192 struct intel_context *ctx,
193 unsigned long flags);
194
195 /**
196 * Decode the commands contained in \p bo. For debugging.
197 *
198 * \param bo Batch buffer to decode.
199 * \param used Size of the commands in bytes.
200 */
201 void
202 intel_winsys_decode_bo(struct intel_winsys *winsys,
203 struct intel_bo *bo, int used);
204
205 /**
206 * Increase the reference count of \p bo. No-op when \p bo is NULL.
207 */
208 struct intel_bo *
209 intel_bo_ref(struct intel_bo *bo);
210
211 /**
212 * Decrease the reference count of \p bo. When the reference count reaches
213 * zero, \p bo is destroyed. No-op when \p bo is NULL.
214 */
215 void
216 intel_bo_unref(struct intel_bo *bo);
217
218 /**
219 * Set the tiling of \p bo. The info is used by GTT mapping and bo export.
220 */
221 int
222 intel_bo_set_tiling(struct intel_bo *bo,
223 enum intel_tiling_mode tiling,
224 unsigned long pitch);
225
226 /**
227 * Map \p bo for CPU access. Recursive mapping is allowed.
228 *
229 * map() maps the backing store into CPU address space, cached. It will block
230 * if the bo is busy. This variant allows fastest random reads and writes,
231 * but the caller needs to handle tiling or swizzling manually if the bo is
232 * tiled or swizzled. If write is enabled and there is no shared last-level
233 * cache (LLC), the CPU cache will be flushed, which is expensive.
234 *
235 * map_gtt() maps the bo for MMIO access, uncached but write-combined. It
236 * will block if the bo is busy. This variant promises a reasonable speed for
237 * sequential writes, but reads would be very slow. Callers always have a
238 * linear view of the bo.
239 *
240 * map_async() and map_gtt_async() work similar to map() and map_gtt()
241 * respectively, except that they do not block.
242 */
243 void *
244 intel_bo_map(struct intel_bo *bo, bool write_enable);
245
246 void *
247 intel_bo_map_async(struct intel_bo *bo);
248
249 void *
250 intel_bo_map_gtt(struct intel_bo *bo);
251
252 void *
253 intel_bo_map_gtt_async(struct intel_bo *bo);
254
255 /**
256 * Unmap \p bo.
257 */
258 void
259 intel_bo_unmap(struct intel_bo *bo);
260
261 /**
262 * Write data to \p bo.
263 */
264 int
265 intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
266 unsigned long size, const void *data);
267
268 /**
269 * Read data from the bo.
270 */
271 int
272 intel_bo_pread(struct intel_bo *bo, unsigned long offset,
273 unsigned long size, void *data);
274
275 /**
276 * Add \p target_bo to the relocation list.
277 *
278 * When \p bo is submitted for execution, and if \p target_bo has moved,
279 * the kernel will patch \p bo at \p offset to \p target_bo->offset plus
280 * \p target_offset.
281 *
282 * \p presumed_offset should be written to \p bo at \p offset.
283 */
284 int
285 intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset,
286 struct intel_bo *target_bo, uint32_t target_offset,
287 uint32_t flags, uint64_t *presumed_offset);
288
289 /**
290 * Return the current number of relocations.
291 */
292 int
293 intel_bo_get_reloc_count(struct intel_bo *bo);
294
295 /**
296 * Truncate all relocations except the first \p start ones.
297 *
298 * Combined with \p intel_bo_get_reloc_count(), they can be used to undo the
299 * \p intel_bo_add_reloc() calls that were just made.
300 */
301 void
302 intel_bo_truncate_relocs(struct intel_bo *bo, int start);
303
304 /**
305 * Return true if \p target_bo is on the relocation list of \p bo, or on
306 * the relocation list of some bo that is referenced by \p bo.
307 */
308 bool
309 intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo);
310
311 /**
312 * Wait until \bo is idle, or \p timeout nanoseconds have passed. A
313 * negative timeout means to wait indefinitely.
314 *
315 * \return 0 only when \p bo is idle
316 */
317 int
318 intel_bo_wait(struct intel_bo *bo, int64_t timeout);
319
320 /**
321 * Return true if \p bo is busy.
322 */
323 static inline bool
324 intel_bo_is_busy(struct intel_bo *bo)
325 {
326 return (intel_bo_wait(bo, 0) != 0);
327 }
328
329 #endif /* INTEL_WINSYS_H */