ilo: PIPE_CAP_QUERY_TIMESTAMP may not be supported
[mesa.git] / src / gallium / winsys / intel / drm / intel_drm_winsys.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include <string.h>
29 #include <errno.h>
30 #ifndef ETIME
31 #define ETIME ETIMEDOUT
32 #endif
33
34 #include <xf86drm.h>
35 #include <i915_drm.h>
36 #include <intel_bufmgr.h>
37
38 #include "state_tracker/drm_driver.h"
39 #include "pipe/p_state.h"
40 #include "util/u_inlines.h"
41 #include "util/u_memory.h"
42 #include "util/u_debug.h"
43 #include "../intel_winsys.h"
44
45 #define BATCH_SZ (8192 * sizeof(uint32_t))
46
47 struct intel_winsys {
48 int fd;
49 drm_intel_bufmgr *bufmgr;
50 struct intel_winsys_info info;
51
52 struct drm_intel_decode *decode;
53 };
54
55 static drm_intel_bo *
56 gem_bo(const struct intel_bo *bo)
57 {
58 return (drm_intel_bo *) bo;
59 }
60
61 static bool
62 get_param(struct intel_winsys *winsys, int param, int *value)
63 {
64 struct drm_i915_getparam gp;
65 int err;
66
67 *value = 0;
68
69 memset(&gp, 0, sizeof(gp));
70 gp.param = param;
71 gp.value = value;
72
73 err = drmCommandWriteRead(winsys->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
74 if (err) {
75 *value = 0;
76 return false;
77 }
78
79 return true;
80 }
81
82 static bool
83 test_address_swizzling(struct intel_winsys *winsys)
84 {
85 drm_intel_bo *bo;
86 uint32_t tiling = I915_TILING_X, swizzle;
87 unsigned long pitch;
88
89 bo = drm_intel_bo_alloc_tiled(winsys->bufmgr,
90 "address swizzling test", 64, 64, 4, &tiling, &pitch, 0);
91 if (bo) {
92 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
93 drm_intel_bo_unreference(bo);
94 }
95 else {
96 swizzle = I915_BIT_6_SWIZZLE_NONE;
97 }
98
99 return (swizzle != I915_BIT_6_SWIZZLE_NONE);
100 }
101
102 static bool
103 test_reg_read(struct intel_winsys *winsys, uint32_t reg)
104 {
105 uint64_t dummy;
106
107 return !drm_intel_reg_read(winsys->bufmgr, reg, &dummy);
108 }
109
110 static bool
111 init_info(struct intel_winsys *winsys)
112 {
113 struct intel_winsys_info *info = &winsys->info;
114 int val;
115
116 /*
117 * When we need the Nth vertex from a user vertex buffer, and the vertex is
118 * uploaded to, say, the beginning of a bo, we want the first vertex in the
119 * bo to be fetched. One way to do this is to set the base address of the
120 * vertex buffer to
121 *
122 * bo->offset64 + (vb->buffer_offset - vb->stride * N).
123 *
124 * The second term may be negative, and we need kernel support to do that.
125 *
126 * This check is taken from the classic driver. u_vbuf_upload_buffers()
127 * guarantees the term is never negative, but it is good to require a
128 * recent kernel.
129 */
130 get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
131 if (!val) {
132 debug_error("kernel 2.6.39 required");
133 return false;
134 }
135
136 info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
137
138 get_param(winsys, I915_PARAM_HAS_LLC, &val);
139 info->has_llc = val;
140
141 /* test TIMESTAMP read */
142 info->has_timestamp = test_reg_read(winsys, 0x2358);
143
144 get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
145 info->has_gen7_sol_reset = val;
146
147 info->has_address_swizzling = test_address_swizzling(winsys);
148
149 return true;
150 }
151
152 struct intel_winsys *
153 intel_winsys_create_for_fd(int fd)
154 {
155 struct intel_winsys *winsys;
156
157 winsys = CALLOC_STRUCT(intel_winsys);
158 if (!winsys)
159 return NULL;
160
161 winsys->fd = fd;
162
163 winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
164 if (!winsys->bufmgr) {
165 debug_error("failed to create GEM buffer manager");
166 FREE(winsys);
167 return NULL;
168 }
169
170 if (!init_info(winsys)) {
171 drm_intel_bufmgr_destroy(winsys->bufmgr);
172 FREE(winsys);
173 return NULL;
174 }
175
176 /*
177 * No need to implicitly set up a fence register for each non-linear reloc
178 * entry. When a fence register is needed for a reloc entry,
179 * drm_intel_bo_emit_reloc_fence() will be called explicitly.
180 *
181 * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
182 * But we never need a fence register on GEN4+ so we do not need to worry
183 * about it yet.
184 */
185 drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
186
187 drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
188
189 return winsys;
190 }
191
192 void
193 intel_winsys_destroy(struct intel_winsys *winsys)
194 {
195 if (winsys->decode)
196 drm_intel_decode_context_free(winsys->decode);
197
198 drm_intel_bufmgr_destroy(winsys->bufmgr);
199 FREE(winsys);
200 }
201
202 const struct intel_winsys_info *
203 intel_winsys_get_info(const struct intel_winsys *winsys)
204 {
205 return &winsys->info;
206 }
207
208 struct intel_context *
209 intel_winsys_create_context(struct intel_winsys *winsys)
210 {
211 return (struct intel_context *)
212 drm_intel_gem_context_create(winsys->bufmgr);
213 }
214
215 void
216 intel_winsys_destroy_context(struct intel_winsys *winsys,
217 struct intel_context *ctx)
218 {
219 drm_intel_gem_context_destroy((drm_intel_context *) ctx);
220 }
221
222 int
223 intel_winsys_read_reg(struct intel_winsys *winsys,
224 uint32_t reg, uint64_t *val)
225 {
226 return drm_intel_reg_read(winsys->bufmgr, reg, val);
227 }
228
229 struct intel_bo *
230 intel_winsys_alloc_buffer(struct intel_winsys *winsys,
231 const char *name,
232 unsigned long size,
233 uint32_t initial_domain)
234 {
235 const bool for_render =
236 (initial_domain & (INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION));
237 const int alignment = 4096; /* always page-aligned */
238 drm_intel_bo *bo;
239
240 if (for_render) {
241 bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
242 name, size, alignment);
243 }
244 else {
245 bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
246 }
247
248 return (struct intel_bo *) bo;
249 }
250
251 struct intel_bo *
252 intel_winsys_alloc_texture(struct intel_winsys *winsys,
253 const char *name,
254 int width, int height, int cpp,
255 enum intel_tiling_mode tiling,
256 uint32_t initial_domain,
257 unsigned long *pitch)
258 {
259 const unsigned long flags =
260 (initial_domain & (INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION)) ?
261 BO_ALLOC_FOR_RENDER : 0;
262 uint32_t real_tiling = tiling;
263 drm_intel_bo *bo;
264
265 bo = drm_intel_bo_alloc_tiled(winsys->bufmgr, name,
266 width, height, cpp, &real_tiling, pitch, flags);
267 if (!bo)
268 return NULL;
269
270 if (real_tiling != tiling) {
271 assert(!"tiling mismatch");
272 drm_intel_bo_unreference(bo);
273 return NULL;
274 }
275
276 return (struct intel_bo *) bo;
277 }
278
279 struct intel_bo *
280 intel_winsys_import_handle(struct intel_winsys *winsys,
281 const char *name,
282 const struct winsys_handle *handle,
283 int width, int height, int cpp,
284 enum intel_tiling_mode *tiling,
285 unsigned long *pitch)
286 {
287 uint32_t real_tiling, swizzle;
288 drm_intel_bo *bo;
289 int err;
290
291 switch (handle->type) {
292 case DRM_API_HANDLE_TYPE_SHARED:
293 {
294 const uint32_t gem_name = handle->handle;
295 bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr,
296 name, gem_name);
297 }
298 break;
299 case DRM_API_HANDLE_TYPE_FD:
300 {
301 const int fd = (int) handle->handle;
302 bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr,
303 fd, height * handle->stride);
304 }
305 break;
306 default:
307 bo = NULL;
308 break;
309 }
310
311 if (!bo)
312 return NULL;
313
314 err = drm_intel_bo_get_tiling(bo, &real_tiling, &swizzle);
315 if (err) {
316 drm_intel_bo_unreference(bo);
317 return NULL;
318 }
319
320 *tiling = real_tiling;
321 *pitch = handle->stride;
322
323 return (struct intel_bo *) bo;
324 }
325
326 int
327 intel_winsys_export_handle(struct intel_winsys *winsys,
328 struct intel_bo *bo,
329 enum intel_tiling_mode tiling,
330 unsigned long pitch,
331 struct winsys_handle *handle)
332 {
333 int err = 0;
334
335 switch (handle->type) {
336 case DRM_API_HANDLE_TYPE_SHARED:
337 {
338 uint32_t name;
339
340 err = drm_intel_bo_flink(gem_bo(bo), &name);
341 if (!err)
342 handle->handle = name;
343 }
344 break;
345 case DRM_API_HANDLE_TYPE_KMS:
346 handle->handle = gem_bo(bo)->handle;
347 break;
348 case DRM_API_HANDLE_TYPE_FD:
349 {
350 int fd;
351
352 err = drm_intel_bo_gem_export_to_prime(gem_bo(bo), &fd);
353 if (!err)
354 handle->handle = fd;
355 }
356 break;
357 default:
358 err = -EINVAL;
359 break;
360 }
361
362 if (err)
363 return err;
364
365 handle->stride = pitch;
366
367 return 0;
368 }
369
370 bool
371 intel_winsys_can_submit_bo(struct intel_winsys *winsys,
372 struct intel_bo **bo_array,
373 int count)
374 {
375 return !drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
376 count);
377 }
378
379 int
380 intel_winsys_submit_bo(struct intel_winsys *winsys,
381 struct intel_bo *bo, int used,
382 struct intel_context *ctx,
383 unsigned long flags)
384 {
385 /* logical contexts are only available for the render ring */
386 if ((flags & 0x7) > INTEL_EXEC_RENDER)
387 ctx = NULL;
388
389 if (ctx) {
390 return drm_intel_gem_bo_context_exec(gem_bo(bo),
391 (drm_intel_context *) ctx, used, flags);
392 }
393 else {
394 return drm_intel_bo_mrb_exec(gem_bo(bo),
395 used, NULL, 0, 0, flags);
396 }
397 }
398
399 void
400 intel_winsys_decode_bo(struct intel_winsys *winsys,
401 struct intel_bo *bo, int used)
402 {
403 void *ptr;
404
405 if (!winsys->decode) {
406 winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
407 if (!winsys->decode)
408 return;
409
410 /* debug_printf()/debug_error() uses stderr by default */
411 drm_intel_decode_set_output_file(winsys->decode, stderr);
412 }
413
414 ptr = intel_bo_map(bo, false);
415 if (!ptr) {
416 debug_printf("failed to map buffer for decoding\n");
417 return;
418 }
419
420 /* in dwords */
421 used /= 4;
422
423 drm_intel_decode_set_batch_pointer(winsys->decode,
424 ptr, gem_bo(bo)->offset64, used);
425
426 drm_intel_decode(winsys->decode);
427
428 intel_bo_unmap(bo);
429 }
430
431 void
432 intel_bo_reference(struct intel_bo *bo)
433 {
434 drm_intel_bo_reference(gem_bo(bo));
435 }
436
437 void
438 intel_bo_unreference(struct intel_bo *bo)
439 {
440 drm_intel_bo_unreference(gem_bo(bo));
441 }
442
443 void *
444 intel_bo_map(struct intel_bo *bo, bool write_enable)
445 {
446 int err;
447
448 err = drm_intel_bo_map(gem_bo(bo), write_enable);
449 if (err) {
450 debug_error("failed to map bo");
451 return NULL;
452 }
453
454 return gem_bo(bo)->virtual;
455 }
456
457 void *
458 intel_bo_map_gtt(struct intel_bo *bo)
459 {
460 int err;
461
462 err = drm_intel_gem_bo_map_gtt(gem_bo(bo));
463 if (err) {
464 debug_error("failed to map bo");
465 return NULL;
466 }
467
468 return gem_bo(bo)->virtual;
469 }
470
471 void *
472 intel_bo_map_unsynchronized(struct intel_bo *bo)
473 {
474 int err;
475
476 err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
477 if (err) {
478 debug_error("failed to map bo");
479 return NULL;
480 }
481
482 return gem_bo(bo)->virtual;
483 }
484
485 void
486 intel_bo_unmap(struct intel_bo *bo)
487 {
488 int err;
489
490 err = drm_intel_bo_unmap(gem_bo(bo));
491 assert(!err);
492 }
493
494 int
495 intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
496 unsigned long size, const void *data)
497 {
498 return drm_intel_bo_subdata(gem_bo(bo), offset, size, data);
499 }
500
501 int
502 intel_bo_pread(struct intel_bo *bo, unsigned long offset,
503 unsigned long size, void *data)
504 {
505 return drm_intel_bo_get_subdata(gem_bo(bo), offset, size, data);
506 }
507
508 int
509 intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset,
510 struct intel_bo *target_bo, uint32_t target_offset,
511 uint32_t read_domains, uint32_t write_domain,
512 uint64_t *presumed_offset)
513 {
514 int err;
515
516 err = drm_intel_bo_emit_reloc(gem_bo(bo), offset,
517 gem_bo(target_bo), target_offset,
518 read_domains, write_domain);
519
520 *presumed_offset = gem_bo(target_bo)->offset64 + target_offset;
521
522 return err;
523 }
524
525 int
526 intel_bo_get_reloc_count(struct intel_bo *bo)
527 {
528 return drm_intel_gem_bo_get_reloc_count(gem_bo(bo));
529 }
530
531 void
532 intel_bo_truncate_relocs(struct intel_bo *bo, int start)
533 {
534 drm_intel_gem_bo_clear_relocs(gem_bo(bo), start);
535 }
536
537 bool
538 intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo)
539 {
540 return drm_intel_bo_references(gem_bo(bo), gem_bo(target_bo));
541 }
542
543 int
544 intel_bo_wait(struct intel_bo *bo, int64_t timeout)
545 {
546 int err;
547
548 err = drm_intel_gem_bo_wait(gem_bo(bo), timeout);
549 /* consider the bo idle on errors */
550 if (err && err != -ETIME)
551 err = 0;
552
553 return err;
554 }