2 * Mesa 3-D graphics library
4 * Copyright (C) 2012-2013 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
28 #ifndef INTEL_WINSYS_H
29 #define INTEL_WINSYS_H
31 #include "pipe/p_compiler.h"
33 /* this is compatible with i915_drm.h's definitions */
34 enum intel_exec_flag
{
35 /* bits[2:0]: ring type */
36 INTEL_EXEC_DEFAULT
= 0 << 0,
37 INTEL_EXEC_RENDER
= 1 << 0,
38 INTEL_EXEC_BSD
= 2 << 0,
39 INTEL_EXEC_BLT
= 3 << 0,
41 /* bits[7:6]: constant buffer addressing mode */
43 /* bits[8]: reset SO write offset register on GEN7+ */
44 INTEL_EXEC_GEN7_SOL_RESET
= 1 << 8,
47 /* this is compatible with i915_drm.h's definitions */
48 enum intel_domain_flag
{
49 INTEL_DOMAIN_CPU
= 0x00000001,
50 INTEL_DOMAIN_RENDER
= 0x00000002,
51 INTEL_DOMAIN_SAMPLER
= 0x00000004,
52 INTEL_DOMAIN_COMMAND
= 0x00000008,
53 INTEL_DOMAIN_INSTRUCTION
= 0x00000010,
54 INTEL_DOMAIN_VERTEX
= 0x00000020,
55 INTEL_DOMAIN_GTT
= 0x00000040,
58 /* this is compatible with i915_drm.h's definitions */
59 enum intel_tiling_mode
{
60 INTEL_TILING_NONE
= 0,
65 /* this is compatible with intel_bufmgr.h's definitions */
66 enum intel_alloc_flag
{
67 INTEL_ALLOC_FOR_RENDER
= 1 << 0,
75 struct intel_winsys_info
{
78 bool has_gen7_sol_reset
;
79 bool has_address_swizzling
;
83 intel_winsys_create_for_fd(int fd
);
86 intel_winsys_destroy(struct intel_winsys
*winsys
);
88 const struct intel_winsys_info
*
89 intel_winsys_get_info(const struct intel_winsys
*winsys
);
92 intel_winsys_enable_reuse(struct intel_winsys
*winsys
);
94 struct intel_context
*
95 intel_winsys_create_context(struct intel_winsys
*winsys
);
98 intel_winsys_destroy_context(struct intel_winsys
*winsys
,
99 struct intel_context
*ctx
);
102 intel_winsys_read_reg(struct intel_winsys
*winsys
,
103 uint32_t reg
, uint64_t *val
);
106 intel_winsys_alloc_buffer(struct intel_winsys
*winsys
,
109 unsigned long flags
);
112 intel_winsys_alloc_texture(struct intel_winsys
*winsys
,
114 int width
, int height
, int cpp
,
115 enum intel_tiling_mode tiling
,
117 unsigned long *pitch
);
120 intel_winsys_import_handle(struct intel_winsys
*winsys
,
122 const struct winsys_handle
*handle
,
123 int width
, int height
, int cpp
,
124 enum intel_tiling_mode
*tiling
,
125 unsigned long *pitch
);
128 * Export a handle for inter-process sharing.
131 intel_winsys_export_handle(struct intel_winsys
*winsys
,
133 enum intel_tiling_mode tiling
,
135 struct winsys_handle
*handle
);
138 intel_winsys_check_aperture_space(struct intel_winsys
*winsys
,
139 struct intel_bo
**bo_array
,
143 intel_winsys_decode_commands(struct intel_winsys
*winsys
,
144 struct intel_bo
*bo
, int used
);
147 intel_bo_reference(struct intel_bo
*bo
);
150 intel_bo_unreference(struct intel_bo
*bo
);
153 intel_bo_get_size(const struct intel_bo
*bo
);
156 intel_bo_get_offset(const struct intel_bo
*bo
);
159 intel_bo_get_virtual(const struct intel_bo
*bo
);
162 * Map/unmap \p bo for CPU access.
164 * map() maps the backing store into CPU address space, cached. This
165 * variant allows for fast random reads and writes. But the caller needs
166 * handle tiling or swizzling manually if the bo is tiled or swizzled. If
167 * write is enabled and there is no shared last-level cache (LLC), unmap()
168 * needs to flush the cache, which is rather expensive.
170 * map_gtt() maps the bo for MMIO access, uncached but write-combined.
171 * This variant promises a reasonable speed for sequential writes, but
172 * reads would be very slow. Callers always have a linear view of the bo.
174 * map_unsynchronized() is similar to map_gtt(), except that it does not
175 * wait until the bo is idle.
178 intel_bo_map(struct intel_bo
*bo
, bool write_enable
);
181 intel_bo_map_gtt(struct intel_bo
*bo
);
184 intel_bo_map_unsynchronized(struct intel_bo
*bo
);
187 intel_bo_unmap(struct intel_bo
*bo
);
190 * Move data in to or out of the bo.
193 intel_bo_pwrite(struct intel_bo
*bo
, unsigned long offset
,
194 unsigned long size
, const void *data
);
196 intel_bo_pread(struct intel_bo
*bo
, unsigned long offset
,
197 unsigned long size
, void *data
);
200 * Add \p target_bo to the relocation list.
202 * When \p bo is submitted for execution, and if \p target_bo has moved,
203 * the kernel will patch \p bo at \p offset to \p target_bo->offset plus
207 intel_bo_emit_reloc(struct intel_bo
*bo
, uint32_t offset
,
208 struct intel_bo
*target_bo
, uint32_t target_offset
,
209 uint32_t read_domains
, uint32_t write_domain
);
212 * Return the current number of relocations.
215 intel_bo_get_reloc_count(struct intel_bo
*bo
);
218 * Discard all relocations except the first \p start ones.
220 * Combined with \p get_reloc_count(), they can be used to undo
221 * the \p emit_reloc() calls that were just made.
224 intel_bo_clear_relocs(struct intel_bo
*bo
, int start
);
227 * Return true if \p target_bo is on the relocation list of \p bo, or on
228 * the relocation list of some bo that is referenced by \p bo.
231 intel_bo_references(struct intel_bo
*bo
, struct intel_bo
*target_bo
);
234 * Submit \p bo for execution.
236 * \p bo and all bos referenced by \p bo will be considered busy until all
237 * commands are parsed and executed.
240 intel_bo_exec(struct intel_bo
*bo
, int used
,
241 struct intel_context
*ctx
, unsigned long flags
);
244 * Wait until \bo is idle, or \p timeout nanoseconds have passed. A
245 * negative timeout means to wait indefinitely.
247 * \return 0 only when \p bo is idle
250 intel_bo_wait(struct intel_bo
*bo
, int64_t timeout
);
253 * Return true if \p bo is busy.
256 intel_bo_is_busy(struct intel_bo
*bo
)
258 return (intel_bo_wait(bo
, 0) != 0);
261 #endif /* INTEL_WINSYS_H */