ilo: move away from drm_intel_bo_alloc_tiled
[mesa.git] / src / gallium / winsys / intel / drm / intel_drm_winsys.c
1 /*
2 * Mesa 3-D graphics library
3 *
4 * Copyright (C) 2012-2014 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Authors:
25 * Chia-I Wu <olv@lunarg.com>
26 */
27
28 #include <string.h>
29 #include <errno.h>
30 #ifndef ETIME
31 #define ETIME ETIMEDOUT
32 #endif
33
34 #include <xf86drm.h>
35 #include <i915_drm.h>
36 #include <intel_bufmgr.h>
37
38 #include "os/os_thread.h"
39 #include "state_tracker/drm_driver.h"
40 #include "pipe/p_state.h"
41 #include "util/u_inlines.h"
42 #include "util/u_memory.h"
43 #include "util/u_debug.h"
44 #include "../intel_winsys.h"
45
46 #define BATCH_SZ (8192 * sizeof(uint32_t))
47
48 struct intel_winsys {
49 int fd;
50 drm_intel_bufmgr *bufmgr;
51 struct intel_winsys_info info;
52
53 /* these are protected by the mutex */
54 pipe_mutex mutex;
55 drm_intel_context *first_gem_ctx;
56 struct drm_intel_decode *decode;
57 };
58
59 static drm_intel_bo *
60 gem_bo(const struct intel_bo *bo)
61 {
62 return (drm_intel_bo *) bo;
63 }
64
65 static bool
66 get_param(struct intel_winsys *winsys, int param, int *value)
67 {
68 struct drm_i915_getparam gp;
69 int err;
70
71 *value = 0;
72
73 memset(&gp, 0, sizeof(gp));
74 gp.param = param;
75 gp.value = value;
76
77 err = drmCommandWriteRead(winsys->fd, DRM_I915_GETPARAM, &gp, sizeof(gp));
78 if (err) {
79 *value = 0;
80 return false;
81 }
82
83 return true;
84 }
85
86 static bool
87 test_address_swizzling(struct intel_winsys *winsys)
88 {
89 drm_intel_bo *bo;
90 uint32_t tiling = I915_TILING_X, swizzle;
91 unsigned long pitch;
92
93 bo = drm_intel_bo_alloc_tiled(winsys->bufmgr,
94 "address swizzling test", 64, 64, 4, &tiling, &pitch, 0);
95 if (bo) {
96 drm_intel_bo_get_tiling(bo, &tiling, &swizzle);
97 drm_intel_bo_unreference(bo);
98 }
99 else {
100 swizzle = I915_BIT_6_SWIZZLE_NONE;
101 }
102
103 return (swizzle != I915_BIT_6_SWIZZLE_NONE);
104 }
105
106 static bool
107 test_reg_read(struct intel_winsys *winsys, uint32_t reg)
108 {
109 uint64_t dummy;
110
111 return !drm_intel_reg_read(winsys->bufmgr, reg, &dummy);
112 }
113
114 static bool
115 probe_winsys(struct intel_winsys *winsys)
116 {
117 struct intel_winsys_info *info = &winsys->info;
118 int val;
119
120 /*
121 * When we need the Nth vertex from a user vertex buffer, and the vertex is
122 * uploaded to, say, the beginning of a bo, we want the first vertex in the
123 * bo to be fetched. One way to do this is to set the base address of the
124 * vertex buffer to
125 *
126 * bo->offset64 + (vb->buffer_offset - vb->stride * N).
127 *
128 * The second term may be negative, and we need kernel support to do that.
129 *
130 * This check is taken from the classic driver. u_vbuf_upload_buffers()
131 * guarantees the term is never negative, but it is good to require a
132 * recent kernel.
133 */
134 get_param(winsys, I915_PARAM_HAS_RELAXED_DELTA, &val);
135 if (!val) {
136 debug_error("kernel 2.6.39 required");
137 return false;
138 }
139
140 info->devid = drm_intel_bufmgr_gem_get_devid(winsys->bufmgr);
141
142 info->max_batch_size = BATCH_SZ;
143
144 get_param(winsys, I915_PARAM_HAS_LLC, &val);
145 info->has_llc = val;
146 info->has_address_swizzling = test_address_swizzling(winsys);
147
148 winsys->first_gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
149 info->has_logical_context = (winsys->first_gem_ctx != NULL);
150
151 get_param(winsys, I915_PARAM_HAS_ALIASING_PPGTT, &val);
152 info->has_ppgtt = val;
153
154 /* test TIMESTAMP read */
155 info->has_timestamp = test_reg_read(winsys, 0x2358);
156
157 get_param(winsys, I915_PARAM_HAS_GEN7_SOL_RESET, &val);
158 info->has_gen7_sol_reset = val;
159
160 return true;
161 }
162
163 struct intel_winsys *
164 intel_winsys_create_for_fd(int fd)
165 {
166 struct intel_winsys *winsys;
167
168 winsys = CALLOC_STRUCT(intel_winsys);
169 if (!winsys)
170 return NULL;
171
172 winsys->fd = fd;
173
174 winsys->bufmgr = drm_intel_bufmgr_gem_init(winsys->fd, BATCH_SZ);
175 if (!winsys->bufmgr) {
176 debug_error("failed to create GEM buffer manager");
177 FREE(winsys);
178 return NULL;
179 }
180
181 pipe_mutex_init(winsys->mutex);
182
183 if (!probe_winsys(winsys)) {
184 pipe_mutex_destroy(winsys->mutex);
185 drm_intel_bufmgr_destroy(winsys->bufmgr);
186 FREE(winsys);
187 return NULL;
188 }
189
190 /*
191 * No need to implicitly set up a fence register for each non-linear reloc
192 * entry. When a fence register is needed for a reloc entry,
193 * drm_intel_bo_emit_reloc_fence() will be called explicitly.
194 *
195 * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
196 * But we never need a fence register on GEN4+ so we do not need to worry
197 * about it yet.
198 */
199 drm_intel_bufmgr_gem_enable_fenced_relocs(winsys->bufmgr);
200
201 drm_intel_bufmgr_gem_enable_reuse(winsys->bufmgr);
202
203 return winsys;
204 }
205
206 void
207 intel_winsys_destroy(struct intel_winsys *winsys)
208 {
209 if (winsys->decode)
210 drm_intel_decode_context_free(winsys->decode);
211
212 if (winsys->first_gem_ctx)
213 drm_intel_gem_context_destroy(winsys->first_gem_ctx);
214
215 pipe_mutex_destroy(winsys->mutex);
216 drm_intel_bufmgr_destroy(winsys->bufmgr);
217 FREE(winsys);
218 }
219
220 const struct intel_winsys_info *
221 intel_winsys_get_info(const struct intel_winsys *winsys)
222 {
223 return &winsys->info;
224 }
225
226 struct intel_context *
227 intel_winsys_create_context(struct intel_winsys *winsys)
228 {
229 drm_intel_context *gem_ctx;
230
231 /* try the preallocated context first */
232 pipe_mutex_lock(winsys->mutex);
233 gem_ctx = winsys->first_gem_ctx;
234 winsys->first_gem_ctx = NULL;
235 pipe_mutex_unlock(winsys->mutex);
236
237 if (!gem_ctx)
238 gem_ctx = drm_intel_gem_context_create(winsys->bufmgr);
239
240 return (struct intel_context *) gem_ctx;
241 }
242
243 void
244 intel_winsys_destroy_context(struct intel_winsys *winsys,
245 struct intel_context *ctx)
246 {
247 drm_intel_gem_context_destroy((drm_intel_context *) ctx);
248 }
249
250 int
251 intel_winsys_read_reg(struct intel_winsys *winsys,
252 uint32_t reg, uint64_t *val)
253 {
254 return drm_intel_reg_read(winsys->bufmgr, reg, val);
255 }
256
257 struct intel_bo *
258 intel_winsys_alloc_bo(struct intel_winsys *winsys,
259 const char *name,
260 enum intel_tiling_mode tiling,
261 unsigned long pitch,
262 unsigned long height,
263 uint32_t initial_domain)
264 {
265 const bool for_render =
266 (initial_domain & (INTEL_DOMAIN_RENDER | INTEL_DOMAIN_INSTRUCTION));
267 const unsigned int alignment = 4096; /* always page-aligned */
268 unsigned long size;
269 drm_intel_bo *bo;
270
271 switch (tiling) {
272 case INTEL_TILING_X:
273 if (pitch % 512)
274 return NULL;
275 break;
276 case INTEL_TILING_Y:
277 if (pitch % 128)
278 return NULL;
279 break;
280 default:
281 break;
282 }
283
284 if (pitch > ULONG_MAX / height)
285 return NULL;
286
287 size = pitch * height;
288
289 if (for_render) {
290 bo = drm_intel_bo_alloc_for_render(winsys->bufmgr,
291 name, size, alignment);
292 }
293 else {
294 bo = drm_intel_bo_alloc(winsys->bufmgr, name, size, alignment);
295 }
296
297 if (bo && tiling != INTEL_TILING_NONE) {
298 uint32_t real_tiling = tiling;
299 int err;
300
301 err = drm_intel_bo_set_tiling(bo, &real_tiling, pitch);
302 if (err || real_tiling != tiling) {
303 assert(!"tiling mismatch");
304 drm_intel_bo_unreference(bo);
305 return NULL;
306 }
307 }
308
309 return (struct intel_bo *) bo;
310 }
311
312 struct intel_bo *
313 intel_winsys_import_handle(struct intel_winsys *winsys,
314 const char *name,
315 const struct winsys_handle *handle,
316 unsigned long height,
317 enum intel_tiling_mode *tiling,
318 unsigned long *pitch)
319 {
320 uint32_t real_tiling, swizzle;
321 drm_intel_bo *bo;
322 int err;
323
324 switch (handle->type) {
325 case DRM_API_HANDLE_TYPE_SHARED:
326 {
327 const uint32_t gem_name = handle->handle;
328 bo = drm_intel_bo_gem_create_from_name(winsys->bufmgr,
329 name, gem_name);
330 }
331 break;
332 case DRM_API_HANDLE_TYPE_FD:
333 {
334 const int fd = (int) handle->handle;
335 bo = drm_intel_bo_gem_create_from_prime(winsys->bufmgr,
336 fd, height * handle->stride);
337 }
338 break;
339 default:
340 bo = NULL;
341 break;
342 }
343
344 if (!bo)
345 return NULL;
346
347 err = drm_intel_bo_get_tiling(bo, &real_tiling, &swizzle);
348 if (err) {
349 drm_intel_bo_unreference(bo);
350 return NULL;
351 }
352
353 *tiling = real_tiling;
354 *pitch = handle->stride;
355
356 return (struct intel_bo *) bo;
357 }
358
359 int
360 intel_winsys_export_handle(struct intel_winsys *winsys,
361 struct intel_bo *bo,
362 enum intel_tiling_mode tiling,
363 unsigned long pitch,
364 unsigned long height,
365 struct winsys_handle *handle)
366 {
367 int err = 0;
368
369 switch (handle->type) {
370 case DRM_API_HANDLE_TYPE_SHARED:
371 {
372 uint32_t name;
373
374 err = drm_intel_bo_flink(gem_bo(bo), &name);
375 if (!err)
376 handle->handle = name;
377 }
378 break;
379 case DRM_API_HANDLE_TYPE_KMS:
380 handle->handle = gem_bo(bo)->handle;
381 break;
382 case DRM_API_HANDLE_TYPE_FD:
383 {
384 int fd;
385
386 err = drm_intel_bo_gem_export_to_prime(gem_bo(bo), &fd);
387 if (!err)
388 handle->handle = fd;
389 }
390 break;
391 default:
392 err = -EINVAL;
393 break;
394 }
395
396 if (err)
397 return err;
398
399 handle->stride = pitch;
400
401 return 0;
402 }
403
404 bool
405 intel_winsys_can_submit_bo(struct intel_winsys *winsys,
406 struct intel_bo **bo_array,
407 int count)
408 {
409 return !drm_intel_bufmgr_check_aperture_space((drm_intel_bo **) bo_array,
410 count);
411 }
412
413 int
414 intel_winsys_submit_bo(struct intel_winsys *winsys,
415 enum intel_ring_type ring,
416 struct intel_bo *bo, int used,
417 struct intel_context *ctx,
418 unsigned long flags)
419 {
420 const unsigned long exec_flags = (unsigned long) ring | flags;
421
422 /* logical contexts are only available for the render ring */
423 if (ring != INTEL_RING_RENDER)
424 ctx = NULL;
425
426 if (ctx) {
427 return drm_intel_gem_bo_context_exec(gem_bo(bo),
428 (drm_intel_context *) ctx, used, exec_flags);
429 }
430 else {
431 return drm_intel_bo_mrb_exec(gem_bo(bo),
432 used, NULL, 0, 0, exec_flags);
433 }
434 }
435
436 void
437 intel_winsys_decode_bo(struct intel_winsys *winsys,
438 struct intel_bo *bo, int used)
439 {
440 void *ptr;
441
442 ptr = intel_bo_map(bo, false);
443 if (!ptr) {
444 debug_printf("failed to map buffer for decoding\n");
445 return;
446 }
447
448 pipe_mutex_lock(winsys->mutex);
449
450 if (!winsys->decode) {
451 winsys->decode = drm_intel_decode_context_alloc(winsys->info.devid);
452 if (!winsys->decode) {
453 pipe_mutex_unlock(winsys->mutex);
454 intel_bo_unmap(bo);
455 return;
456 }
457
458 /* debug_printf()/debug_error() uses stderr by default */
459 drm_intel_decode_set_output_file(winsys->decode, stderr);
460 }
461
462 /* in dwords */
463 used /= 4;
464
465 drm_intel_decode_set_batch_pointer(winsys->decode,
466 ptr, gem_bo(bo)->offset64, used);
467
468 drm_intel_decode(winsys->decode);
469
470 pipe_mutex_unlock(winsys->mutex);
471
472 intel_bo_unmap(bo);
473 }
474
475 void
476 intel_bo_reference(struct intel_bo *bo)
477 {
478 drm_intel_bo_reference(gem_bo(bo));
479 }
480
481 void
482 intel_bo_unreference(struct intel_bo *bo)
483 {
484 drm_intel_bo_unreference(gem_bo(bo));
485 }
486
487 void *
488 intel_bo_map(struct intel_bo *bo, bool write_enable)
489 {
490 int err;
491
492 err = drm_intel_bo_map(gem_bo(bo), write_enable);
493 if (err) {
494 debug_error("failed to map bo");
495 return NULL;
496 }
497
498 return gem_bo(bo)->virtual;
499 }
500
501 void *
502 intel_bo_map_gtt(struct intel_bo *bo)
503 {
504 int err;
505
506 err = drm_intel_gem_bo_map_gtt(gem_bo(bo));
507 if (err) {
508 debug_error("failed to map bo");
509 return NULL;
510 }
511
512 return gem_bo(bo)->virtual;
513 }
514
515 void *
516 intel_bo_map_unsynchronized(struct intel_bo *bo)
517 {
518 int err;
519
520 err = drm_intel_gem_bo_map_unsynchronized(gem_bo(bo));
521 if (err) {
522 debug_error("failed to map bo");
523 return NULL;
524 }
525
526 return gem_bo(bo)->virtual;
527 }
528
529 void
530 intel_bo_unmap(struct intel_bo *bo)
531 {
532 int err;
533
534 err = drm_intel_bo_unmap(gem_bo(bo));
535 assert(!err);
536 }
537
538 int
539 intel_bo_pwrite(struct intel_bo *bo, unsigned long offset,
540 unsigned long size, const void *data)
541 {
542 return drm_intel_bo_subdata(gem_bo(bo), offset, size, data);
543 }
544
545 int
546 intel_bo_pread(struct intel_bo *bo, unsigned long offset,
547 unsigned long size, void *data)
548 {
549 return drm_intel_bo_get_subdata(gem_bo(bo), offset, size, data);
550 }
551
552 int
553 intel_bo_add_reloc(struct intel_bo *bo, uint32_t offset,
554 struct intel_bo *target_bo, uint32_t target_offset,
555 uint32_t read_domains, uint32_t write_domain,
556 uint64_t *presumed_offset)
557 {
558 int err;
559
560 err = drm_intel_bo_emit_reloc(gem_bo(bo), offset,
561 gem_bo(target_bo), target_offset,
562 read_domains, write_domain);
563
564 *presumed_offset = gem_bo(target_bo)->offset64 + target_offset;
565
566 return err;
567 }
568
569 int
570 intel_bo_get_reloc_count(struct intel_bo *bo)
571 {
572 return drm_intel_gem_bo_get_reloc_count(gem_bo(bo));
573 }
574
575 void
576 intel_bo_truncate_relocs(struct intel_bo *bo, int start)
577 {
578 drm_intel_gem_bo_clear_relocs(gem_bo(bo), start);
579 }
580
581 bool
582 intel_bo_has_reloc(struct intel_bo *bo, struct intel_bo *target_bo)
583 {
584 return drm_intel_bo_references(gem_bo(bo), gem_bo(target_bo));
585 }
586
587 int
588 intel_bo_wait(struct intel_bo *bo, int64_t timeout)
589 {
590 int err;
591
592 err = drm_intel_gem_bo_wait(gem_bo(bo), timeout);
593 /* consider the bo idle on errors */
594 if (err && err != -ETIME)
595 err = 0;
596
597 return err;
598 }