ilo: fix DRM_API_HANDLE_TYPE_FD export
[mesa.git] / src / gallium / winsys / intel / drm / intel_drm_winsys.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include <string.h>
29 #include <errno.h>
30 #ifndef ETIME
31 #define ETIME ETIMEDOUT
32 #endif
33
34 #include <xf86drm.h>
35 #include <i915_drm.h>
36 #include <intel_bufmgr.h>
37
38 #include "state_tracker/drm_driver.h"
39 #include "pipe/p_state.h"
40 #include "util/u_inlines.h"
41 #include "util/u_memory.h"
42 #include "util/u_debug.h"
43 #include "../intel_winsys.h"
44
45 #define BATCH_SZ (8192 * sizeof(uint32_t))
46
47 struct intel_winsys {
48 int fd;
49 drm_intel_bufmgr *bufmgr;
50 struct intel_winsys_info info;
51
52 struct drm_intel_decode *decode;
53 };
54
55 static bool
56 get_param(struct intel_winsys *winsys, int param, int *value)
57 {
58 struct drm_i915_getparam gp;
59 int err;
60
61 *value = 0;
62
63 memset(&gp, 0, sizeof(gp));
64 gp.param = param;
65 gp.value = value;
66
67 err = drmCommandWriteRead(winsys->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
68 if (err) {
69 *value = 0;
70 return false;
71 }
72
73 return true;
74 }
75
76 static bool
77 test_address_swizzling(struct intel_winsys *winsys)
78 {
79 drm_intel_bo *bo;
80 uint32_t tiling = I915_TILING_X, swizzle;
81 unsigned long pitch;
82
83 bo = drm_intel_bo_alloc_tiled(winsys->bufmgr,
84 "address swizzling test", 64, 64, 4, &tiling, &pitch, 0);
85 if (bo) {
86 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
87 drm_intel_bo_unreference(bo);
88 }
89 else {
90 swizzle = I915_BIT_6_SWIZZLE_NONE;
91 }
92
93 return (swizzle != I915_BIT_6_SWIZZLE_NONE);
94 }
95
96 static bool
97 init_info(struct intel_winsys *winsys)
98 {
99 struct intel_winsys_info *info = &winsys->info;
100 int val;
101
102 /*
103 * When we need the Nth vertex from a user vertex buffer, and the vertex is
104 * uploaded to, say, the beginning of a bo, we want the first vertex in the
105 * bo to be fetched. One way to do this is to set the base address of the
106 * vertex buffer to
107 *
108 * bo->offset64 + (vb->buffer_offset - vb->stride * N).
109 *
110 * The second term may be negative, and we need kernel support to do that.
111 *
112 * This check is taken from the classic driver. u_vbuf_upload_buffers()
113 * guarantees the term is never negative, but it is good to require a
114 * recent kernel.
115 */
116 get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
117 if (!val) {
118 debug_error("kernel 2.6.39 required");
119 return false;
120 }
121
122 info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
123
124 get_param(winsys, I915_PARAM_HAS_LLC, &val);
125 info->has_llc = val;
126
127 get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
128 info->has_gen7_sol_reset = val;
129
130 info->has_address_swizzling = test_address_swizzling(winsys);
131
132 return true;
133 }
134
135 struct intel_winsys *
136 intel_winsys_create_for_fd(int fd)
137 {
138 struct intel_winsys *winsys;
139
140 winsys = CALLOC_STRUCT(intel_winsys);
141 if (!winsys)
142 return NULL;
143
144 winsys->fd = fd;
145
146 winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
147 if (!winsys->bufmgr) {
148 debug_error("failed to create GEM buffer manager");
149 FREE(winsys);
150 return NULL;
151 }
152
153 if (!init_info(winsys)) {
154 drm_intel_bufmgr_destroy(winsys->bufmgr);
155 FREE(winsys);
156 return NULL;
157 }
158
159 /*
160 * No need to implicitly set up a fence register for each non-linear reloc
161 * entry. When a fence register is needed for a reloc entry,
162 * drm_intel_bo_emit_reloc_fence() will be called explicitly.
163 *
164 * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
165 * But we never need a fence register on GEN4+ so we do not need to worry
166 * about it yet.
167 */
168 drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
169
170 drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
171
172 return winsys;
173 }
174
175 void
176 intel_winsys_destroy(struct intel_winsys *winsys)
177 {
178 if (winsys->decode)
179 drm_intel_decode_context_free(winsys->decode);
180
181 drm_intel_bufmgr_destroy(winsys->bufmgr);
182 FREE(winsys);
183 }
184
185 const struct intel_winsys_info *
186 intel_winsys_get_info(const struct intel_winsys *winsys)
187 {
188 return &winsys->info;
189 }
190
191 struct intel_context *
192 intel_winsys_create_context(struct intel_winsys *winsys)
193 {
194 return (struct intel_context *)
195 drm_intel_gem_context_create(winsys->bufmgr);
196 }
197
198 void
199 intel_winsys_destroy_context(struct intel_winsys *winsys,
200 struct intel_context *ctx)
201 {
202 drm_intel_gem_context_destroy((drm_intel_context *) ctx);
203 }
204
205 int
206 intel_winsys_read_reg(struct intel_winsys *winsys,
207 uint32_t reg, uint64_t *val)
208 {
209 return drm_intel_reg_read(winsys->bufmgr, reg, val);
210 }
211
212 struct intel_bo *
213 intel_winsys_alloc_buffer(struct intel_winsys *winsys,
214 const char *name,
215 unsigned long size,
216 unsigned long flags)
217 {
218 const int alignment = 4096; /* always page-aligned */
219 drm_intel_bo *bo;
220
221 if (flags == INTEL_ALLOC_FOR_RENDER) {
222 bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
223 name, size, alignment);
224 }
225 else {
226 assert(!flags);
227 bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
228 }
229
230 return (struct intel_bo *) bo;
231 }
232
233 struct intel_bo *
234 intel_winsys_alloc_texture(struct intel_winsys *winsys,
235 const char *name,
236 int width, int height, int cpp,
237 enum intel_tiling_mode tiling,
238 unsigned long flags,
239 unsigned long *pitch)
240 {
241 uint32_t real_tiling = tiling;
242 drm_intel_bo *bo;
243
244 bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
245 width, height, cpp, &real_tiling, pitch, flags);
246 if (!bo)
247 return NULL;
248
249 if (real_tiling != tiling) {
250 assert(!"tiling mismatch");
251 drm_intel_bo_unreference(bo);
252 return NULL;
253 }
254
255 return (struct intel_bo *) bo;
256 }
257
258 struct intel_bo *
259 intel_winsys_import_handle(struct intel_winsys *winsys,
260 const char *name,
261 const struct winsys_handle *handle,
262 int width, int height, int cpp,
263 enum intel_tiling_mode *tiling,
264 unsigned long *pitch)
265 {
266 uint32_t real_tiling, swizzle;
267 drm_intel_bo *bo;
268 int err;
269
270 switch (handle->type) {
271 case DRM_API_HANDLE_TYPE_SHARED:
272 {
273 const uint32_t gem_name = handle->handle;
274 bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr,
275 name, gem_name);
276 }
277 break;
278 case DRM_API_HANDLE_TYPE_FD:
279 {
280 const int fd = (int) handle->handle;
281 bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr,
282 fd, height * handle->stride);
283 }
284 break;
285 default:
286 bo = NULL;
287 break;
288 }
289
290 if (!bo)
291 return NULL;
292
293 err = drm_intel_bo_get_tiling(bo, &real_tiling, &swizzle);
294 if (err) {
295 drm_intel_bo_unreference(bo);
296 return NULL;
297 }
298
299 *tiling = real_tiling;
300 *pitch = handle->stride;
301
302 return (struct intel_bo *) bo;
303 }
304
305 int
306 intel_winsys_export_handle(struct intel_winsys *winsys,
307 struct intel_bo *bo,
308 enum intel_tiling_mode tiling,
309 unsigned long pitch,
310 struct winsys_handle *handle)
311 {
312 int err = 0;
313
314 switch (handle->type) {
315 case DRM_API_HANDLE_TYPE_SHARED:
316 {
317 uint32_t name;
318
319 err = drm_intel_bo_flink((drm_intel_bo *) bo, &name);
320 if (!err)
321 handle->handle = name;
322 }
323 break;
324 case DRM_API_HANDLE_TYPE_KMS:
325 handle->handle = ((drm_intel_bo *) bo)->handle;
326 break;
327 case DRM_API_HANDLE_TYPE_FD:
328 {
329 int fd;
330
331 err = drm_intel_bo_gem_export_to_prime((drm_intel_bo *) bo, &fd);
332 if (!err)
333 handle->handle = fd;
334 }
335 break;
336 default:
337 err = -EINVAL;
338 break;
339 }
340
341 if (err)
342 return err;
343
344 handle->stride = pitch;
345
346 return 0;
347 }
348
349 int
350 intel_winsys_check_aperture_space(struct intel_winsys *winsys,
351 struct intel_bo **bo_array,
352 int count)
353 {
354 return drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
355 count);
356 }
357
358 void
359 intel_winsys_decode_commands(struct intel_winsys *winsys,
360 struct intel_bo *bo, int used)
361 {
362 int err;
363
364 if (!winsys->decode) {
365 winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
366 if (!winsys->decode)
367 return;
368
369 /* debug_printf()/debug_error() uses stderr by default */
370 drm_intel_decode_set_output_file(winsys->decode, stderr);
371 }
372
373 err = intel_bo_map(bo, false);
374 if (err) {
375 debug_printf("failed to map buffer for decoding\n");
376 return;
377 }
378
379 /* in dwords */
380 used /= 4;
381
382 drm_intel_decode_set_batch_pointer(winsys->decode,
383 intel_bo_get_virtual(bo), intel_bo_get_offset(bo), used);
384
385 drm_intel_decode(winsys->decode);
386
387 intel_bo_unmap(bo);
388 }
389
390 void
391 intel_bo_reference(struct intel_bo *bo)
392 {
393 drm_intel_bo_reference((drm_intel_bo *) bo);
394 }
395
396 void
397 intel_bo_unreference(struct intel_bo *bo)
398 {
399 drm_intel_bo_unreference((drm_intel_bo *) bo);
400 }
401
402 unsigned long
403 intel_bo_get_size(const struct intel_bo *bo)
404 {
405 return ((drm_intel_bo *) bo)->size;
406 }
407
408 unsigned long
409 intel_bo_get_offset(const struct intel_bo *bo)
410 {
411 return ((drm_intel_bo *) bo)->offset;
412 }
413
414 void *
415 intel_bo_get_virtual(const struct intel_bo *bo)
416 {
417 return ((drm_intel_bo *) bo)->virtual;
418 }
419
420 int
421 intel_bo_map(struct intel_bo *bo, bool write_enable)
422 {
423 return drm_intel_bo_map((drm_intel_bo *) bo, write_enable);
424 }
425
426 int
427 intel_bo_map_gtt(struct intel_bo *bo)
428 {
429 return drm_intel_gem_bo_map_gtt((drm_intel_bo *) bo);
430 }
431
432 int
433 intel_bo_map_unsynchronized(struct intel_bo *bo)
434 {
435 return drm_intel_gem_bo_map_unsynchronized((drm_intel_bo *) bo);
436 }
437
438 void
439 intel_bo_unmap(struct intel_bo *bo)
440 {
441 int err;
442
443 err = drm_intel_bo_unmap((drm_intel_bo *) bo);
444 assert(!err);
445 }
446
447 int
448 intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
449 unsigned long size, const void *data)
450 {
451 return drm_intel_bo_subdata((drm_intel_bo *) bo, offset, size, data);
452 }
453
454 int
455 intel_bo_pread(struct intel_bo *bo, unsigned long offset,
456 unsigned long size, void *data)
457 {
458 return drm_intel_bo_get_subdata((drm_intel_bo *) bo, offset, size, data);
459 }
460
461 int
462 intel_bo_emit_reloc(struct intel_bo *bo, uint32_t offset,
463 struct intel_bo *target_bo, uint32_t target_offset,
464 uint32_t read_domains, uint32_t write_domain)
465 {
466 return drm_intel_bo_emit_reloc((drm_intel_bo *) bo, offset,
467 (drm_intel_bo *) target_bo, target_offset,
468 read_domains, write_domain);
469 }
470
471 int
472 intel_bo_get_reloc_count(struct intel_bo *bo)
473 {
474 return drm_intel_gem_bo_get_reloc_count((drm_intel_bo *) bo);
475 }
476
477 void
478 intel_bo_clear_relocs(struct intel_bo *bo, int start)
479 {
480 return drm_intel_gem_bo_clear_relocs((drm_intel_bo *) bo, start);
481 }
482
483 bool
484 intel_bo_references(struct intel_bo *bo, struct intel_bo *target_bo)
485 {
486 return drm_intel_bo_references((drm_intel_bo *) bo,
487 (drm_intel_bo *) target_bo);
488 }
489
490 int
491 intel_bo_exec(struct intel_bo *bo, int used,
492 struct intel_context *ctx, unsigned long flags)
493 {
494 if (ctx) {
495 return drm_intel_gem_bo_context_exec((drm_intel_bo *) bo,
496 (drm_intel_context *) ctx, used, flags);
497 }
498 else {
499 return drm_intel_bo_mrb_exec((drm_intel_bo *) bo,
500 used, NULL, 0, 0, flags);
501 }
502 }
503
504 int
505 intel_bo_wait(struct intel_bo *bo, int64_t timeout)
506 {
507 int err;
508
509 err = drm_intel_gem_bo_wait((drm_intel_bo *) bo, timeout);
510 /* consider the bo idle on errors */
511 if (err && err != -ETIME)
512 err = 0;
513
514 return err;
515 }