v3d: request the kernel to flush caches when TMU is dirty
[mesa.git] / include / drm-uapi / v3d_drm.h
1 /*
2 * Copyright © 2014-2018 Broadcom
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #ifndef _V3D_DRM_H_
25 #define _V3D_DRM_H_
26
27 #include "drm.h"
28
29 #if defined(__cplusplus)
30 extern "C" {
31 #endif
32
33 #define DRM_V3D_SUBMIT_CL 0x00
34 #define DRM_V3D_WAIT_BO 0x01
35 #define DRM_V3D_CREATE_BO 0x02
36 #define DRM_V3D_MMAP_BO 0x03
37 #define DRM_V3D_GET_PARAM 0x04
38 #define DRM_V3D_GET_BO_OFFSET 0x05
39 #define DRM_V3D_SUBMIT_TFU 0x06
40 #define DRM_V3D_SUBMIT_CSD 0x07
41
42 #define DRM_IOCTL_V3D_SUBMIT_CL DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CL, struct drm_v3d_submit_cl)
43 #define DRM_IOCTL_V3D_WAIT_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_WAIT_BO, struct drm_v3d_wait_bo)
44 #define DRM_IOCTL_V3D_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_CREATE_BO, struct drm_v3d_create_bo)
45 #define DRM_IOCTL_V3D_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_MMAP_BO, struct drm_v3d_mmap_bo)
46 #define DRM_IOCTL_V3D_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_PARAM, struct drm_v3d_get_param)
47 #define DRM_IOCTL_V3D_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_V3D_GET_BO_OFFSET, struct drm_v3d_get_bo_offset)
48 #define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
49 #define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
50
51 #define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
52
53 /**
54 * struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
55 * engine.
56 *
57 * This asks the kernel to have the GPU execute an optional binner
58 * command list, and a render command list.
59 *
60 * The L1T, slice, L2C, L2T, and GCA caches will be flushed before
61 * each CL executes. The VCD cache should be flushed (if necessary)
62 * by the submitted CLs. The TLB writes are guaranteed to have been
63 * flushed by the time the render done IRQ happens, which is the
64 * trigger for out_sync. Any dirtying of cachelines by the job (only
65 * possible using TMU writes) must be flushed by the caller using the
66 * CL's cache flush commands.
67 */
68 struct drm_v3d_submit_cl {
69 /* Pointer to the binner command list.
70 *
71 * This is the first set of commands executed, which runs the
72 * coordinate shader to determine where primitives land on the screen,
73 * then writes out the state updates and draw calls necessary per tile
74 * to the tile allocation BO.
75 *
76 * This BCL will block on any previous BCL submitted on the
77 * same FD, but not on any RCL or BCLs submitted by other
78 * clients -- that is left up to the submitter to control
79 * using in_sync_bcl if necessary.
80 */
81 __u32 bcl_start;
82
83 /** End address of the BCL (first byte after the BCL) */
84 __u32 bcl_end;
85
86 /* Offset of the render command list.
87 *
88 * This is the second set of commands executed, which will either
89 * execute the tiles that have been set up by the BCL, or a fixed set
90 * of tiles (in the case of RCL-only blits).
91 *
92 * This RCL will block on this submit's BCL, and any previous
93 * RCL submitted on the same FD, but not on any RCL or BCLs
94 * submitted by other clients -- that is left up to the
95 * submitter to control using in_sync_rcl if necessary.
96 */
97 __u32 rcl_start;
98
99 /** End address of the RCL (first byte after the RCL) */
100 __u32 rcl_end;
101
102 /** An optional sync object to wait on before starting the BCL. */
103 __u32 in_sync_bcl;
104 /** An optional sync object to wait on before starting the RCL. */
105 __u32 in_sync_rcl;
106 /** An optional sync object to place the completion fence in. */
107 __u32 out_sync;
108
109 /* Offset of the tile alloc memory
110 *
111 * This is optional on V3D 3.3 (where the CL can set the value) but
112 * required on V3D 4.1.
113 */
114 __u32 qma;
115
116 /** Size of the tile alloc memory. */
117 __u32 qms;
118
119 /** Offset of the tile state data array. */
120 __u32 qts;
121
122 /* Pointer to a u32 array of the BOs that are referenced by the job.
123 */
124 __u64 bo_handles;
125
126 /* Number of BO handles passed in (size is that times 4). */
127 __u32 bo_handle_count;
128
129 __u32 flags;
130 };
131
132 /**
133 * struct drm_v3d_wait_bo - ioctl argument for waiting for
134 * completion of the last DRM_V3D_SUBMIT_CL on a BO.
135 *
136 * This is useful for cases where multiple processes might be
137 * rendering to a BO and you want to wait for all rendering to be
138 * completed.
139 */
140 struct drm_v3d_wait_bo {
141 __u32 handle;
142 __u32 pad;
143 __u64 timeout_ns;
144 };
145
146 /**
147 * struct drm_v3d_create_bo - ioctl argument for creating V3D BOs.
148 *
149 * There are currently no values for the flags argument, but it may be
150 * used in a future extension.
151 */
152 struct drm_v3d_create_bo {
153 __u32 size;
154 __u32 flags;
155 /** Returned GEM handle for the BO. */
156 __u32 handle;
157 /**
158 * Returned offset for the BO in the V3D address space. This offset
159 * is private to the DRM fd and is valid for the lifetime of the GEM
160 * handle.
161 *
162 * This offset value will always be nonzero, since various HW
163 * units treat 0 specially.
164 */
165 __u32 offset;
166 };
167
168 /**
169 * struct drm_v3d_mmap_bo - ioctl argument for mapping V3D BOs.
170 *
171 * This doesn't actually perform an mmap. Instead, it returns the
172 * offset you need to use in an mmap on the DRM device node. This
173 * means that tools like valgrind end up knowing about the mapped
174 * memory.
175 *
176 * There are currently no values for the flags argument, but it may be
177 * used in a future extension.
178 */
179 struct drm_v3d_mmap_bo {
180 /** Handle for the object being mapped. */
181 __u32 handle;
182 __u32 flags;
183 /** offset into the drm node to use for subsequent mmap call. */
184 __u64 offset;
185 };
186
187 enum drm_v3d_param {
188 DRM_V3D_PARAM_V3D_UIFCFG,
189 DRM_V3D_PARAM_V3D_HUB_IDENT1,
190 DRM_V3D_PARAM_V3D_HUB_IDENT2,
191 DRM_V3D_PARAM_V3D_HUB_IDENT3,
192 DRM_V3D_PARAM_V3D_CORE0_IDENT0,
193 DRM_V3D_PARAM_V3D_CORE0_IDENT1,
194 DRM_V3D_PARAM_V3D_CORE0_IDENT2,
195 DRM_V3D_PARAM_SUPPORTS_TFU,
196 DRM_V3D_PARAM_SUPPORTS_CSD,
197 DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
198 };
199
200 struct drm_v3d_get_param {
201 __u32 param;
202 __u32 pad;
203 __u64 value;
204 };
205
206 /**
207 * Returns the offset for the BO in the V3D address space for this DRM fd.
208 * This is the same value returned by drm_v3d_create_bo, if that was called
209 * from this DRM fd.
210 */
211 struct drm_v3d_get_bo_offset {
212 __u32 handle;
213 __u32 offset;
214 };
215
216 struct drm_v3d_submit_tfu {
217 __u32 icfg;
218 __u32 iia;
219 __u32 iis;
220 __u32 ica;
221 __u32 iua;
222 __u32 ioa;
223 __u32 ios;
224 __u32 coef[4];
225 /* First handle is the output BO, following are other inputs.
226 * 0 for unused.
227 */
228 __u32 bo_handles[4];
229 /* sync object to block on before running the TFU job. Each TFU
230 * job will execute in the order submitted to its FD. Synchronization
231 * against rendering jobs requires using sync objects.
232 */
233 __u32 in_sync;
234 /* Sync object to signal when the TFU job is done. */
235 __u32 out_sync;
236 };
237
238 /* Submits a compute shader for dispatch. This job will block on any
239 * previous compute shaders submitted on this fd, and any other
240 * synchronization must be performed with in_sync/out_sync.
241 */
242 struct drm_v3d_submit_csd {
243 __u32 cfg[7];
244 __u32 coef[4];
245
246 /* Pointer to a u32 array of the BOs that are referenced by the job.
247 */
248 __u64 bo_handles;
249
250 /* Number of BO handles passed in (size is that times 4). */
251 __u32 bo_handle_count;
252
253 /* sync object to block on before running the CSD job. Each
254 * CSD job will execute in the order submitted to its FD.
255 * Synchronization against rendering/TFU jobs or CSD from
256 * other fds requires using sync objects.
257 */
258 __u32 in_sync;
259 /* Sync object to signal when the CSD job is done. */
260 __u32 out_sync;
261 };
262
263 #if defined(__cplusplus)
264 }
265 #endif
266
267 #endif /* _V3D_DRM_H_ */