2 * Mesa 3-D graphics library
4 * Copyright (C) 2012-2014 LunarG, Inc.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included
14 * in all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
25 * Chia-I Wu <olv@lunarg.com>
31 #define ETIME ETIMEDOUT
36 #include <intel_bufmgr.h>
38 #include "os/os_thread.h"
39 #include "state_tracker/drm_driver.h"
40 #include "pipe/p_state.h"
41 #include "util/u_inlines.h"
42 #include "util/u_memory.h"
43 #include "util/u_debug.h"
44 #include "../intel_winsys.h"
46 #define BATCH_SZ (8192 * sizeof(uint32_t))
50 drm_intel_bufmgr
*bufmgr
;
51 struct intel_winsys_info info
;
53 /* these are protected by the mutex */
55 drm_intel_context
*first_gem_ctx
;
56 struct drm_intel_decode
*decode
;
60 gem_bo(const struct intel_bo
*bo
)
62 return (drm_intel_bo
*) bo
;
66 get_param(struct intel_winsys
*winsys
, int param
, int *value
)
68 struct drm_i915_getparam gp
;
73 memset(&gp
, 0, sizeof(gp
));
77 err
= drmCommandWriteRead(winsys
->fd
, DRM_I915_GETPARAM
, &gp
, sizeof(gp
));
87 test_address_swizzling(struct intel_winsys
*winsys
)
90 uint32_t tiling
= I915_TILING_X
, swizzle
;
93 bo
= drm_intel_bo_alloc_tiled(winsys
->bufmgr
,
94 "address swizzling test", 64, 64, 4, &tiling
, &pitch
, 0);
96 drm_intel_bo_get_tiling(bo
, &tiling
, &swizzle
);
97 drm_intel_bo_unreference(bo
);
100 swizzle
= I915_BIT_6_SWIZZLE_NONE
;
103 return (swizzle
!= I915_BIT_6_SWIZZLE_NONE
);
107 test_reg_read(struct intel_winsys
*winsys
, uint32_t reg
)
111 return !drm_intel_reg_read(winsys
->bufmgr
, reg
, &dummy
);
115 probe_winsys(struct intel_winsys
*winsys
)
117 struct intel_winsys_info
*info
= &winsys
->info
;
121 * When we need the Nth vertex from a user vertex buffer, and the vertex is
122 * uploaded to, say, the beginning of a bo, we want the first vertex in the
123 * bo to be fetched. One way to do this is to set the base address of the
126 * bo->offset64 + (vb->buffer_offset - vb->stride * N).
128 * The second term may be negative, and we need kernel support to do that.
130 * This check is taken from the classic driver. u_vbuf_upload_buffers()
131 * guarantees the term is never negative, but it is good to require a
134 get_param(winsys
, I915_PARAM_HAS_RELAXED_DELTA
, &val
);
136 debug_error("kernel 2.6.39 required");
140 info
->devid
= drm_intel_bufmgr_gem_get_devid(winsys
->bufmgr
);
142 info
->max_batch_size
= BATCH_SZ
;
144 get_param(winsys
, I915_PARAM_HAS_LLC
, &val
);
146 info
->has_address_swizzling
= test_address_swizzling(winsys
);
148 winsys
->first_gem_ctx
= drm_intel_gem_context_create(winsys
->bufmgr
);
149 info
->has_logical_context
= (winsys
->first_gem_ctx
!= NULL
);
151 get_param(winsys
, I915_PARAM_HAS_ALIASING_PPGTT
, &val
);
152 info
->has_ppgtt
= val
;
154 /* test TIMESTAMP read */
155 info
->has_timestamp
= test_reg_read(winsys
, 0x2358);
157 get_param(winsys
, I915_PARAM_HAS_GEN7_SOL_RESET
, &val
);
158 info
->has_gen7_sol_reset
= val
;
163 struct intel_winsys
*
164 intel_winsys_create_for_fd(int fd
)
166 struct intel_winsys
*winsys
;
168 winsys
= CALLOC_STRUCT(intel_winsys
);
174 winsys
->bufmgr
= drm_intel_bufmgr_gem_init(winsys
->fd
, BATCH_SZ
);
175 if (!winsys
->bufmgr
) {
176 debug_error("failed to create GEM buffer manager");
181 pipe_mutex_init(winsys
->mutex
);
183 if (!probe_winsys(winsys
)) {
184 pipe_mutex_destroy(winsys
->mutex
);
185 drm_intel_bufmgr_destroy(winsys
->bufmgr
);
191 * No need to implicitly set up a fence register for each non-linear reloc
192 * entry. When a fence register is needed for a reloc entry,
193 * drm_intel_bo_emit_reloc_fence() will be called explicitly.
195 * intel_bo_add_reloc() currently lacks "bool fenced" for this to work.
196 * But we never need a fence register on GEN4+ so we do not need to worry
199 drm_intel_bufmgr_gem_enable_fenced_relocs(winsys
->bufmgr
);
201 drm_intel_bufmgr_gem_enable_reuse(winsys
->bufmgr
);
207 intel_winsys_destroy(struct intel_winsys
*winsys
)
210 drm_intel_decode_context_free(winsys
->decode
);
212 if (winsys
->first_gem_ctx
)
213 drm_intel_gem_context_destroy(winsys
->first_gem_ctx
);
215 pipe_mutex_destroy(winsys
->mutex
);
216 drm_intel_bufmgr_destroy(winsys
->bufmgr
);
220 const struct intel_winsys_info
*
221 intel_winsys_get_info(const struct intel_winsys
*winsys
)
223 return &winsys
->info
;
226 struct intel_context
*
227 intel_winsys_create_context(struct intel_winsys
*winsys
)
229 drm_intel_context
*gem_ctx
;
231 /* try the preallocated context first */
232 pipe_mutex_lock(winsys
->mutex
);
233 gem_ctx
= winsys
->first_gem_ctx
;
234 winsys
->first_gem_ctx
= NULL
;
235 pipe_mutex_unlock(winsys
->mutex
);
238 gem_ctx
= drm_intel_gem_context_create(winsys
->bufmgr
);
240 return (struct intel_context
*) gem_ctx
;
244 intel_winsys_destroy_context(struct intel_winsys
*winsys
,
245 struct intel_context
*ctx
)
247 drm_intel_gem_context_destroy((drm_intel_context
*) ctx
);
251 intel_winsys_read_reg(struct intel_winsys
*winsys
,
252 uint32_t reg
, uint64_t *val
)
254 return drm_intel_reg_read(winsys
->bufmgr
, reg
, val
);
258 intel_winsys_alloc_bo(struct intel_winsys
*winsys
,
260 enum intel_tiling_mode tiling
,
262 unsigned long height
,
263 uint32_t initial_domain
)
265 const bool for_render
=
266 (initial_domain
& (INTEL_DOMAIN_RENDER
| INTEL_DOMAIN_INSTRUCTION
));
267 const unsigned int alignment
= 4096; /* always page-aligned */
284 if (pitch
> ULONG_MAX
/ height
)
287 size
= pitch
* height
;
290 bo
= drm_intel_bo_alloc_for_render(winsys
->bufmgr
,
291 name
, size
, alignment
);
294 bo
= drm_intel_bo_alloc(winsys
->bufmgr
, name
, size
, alignment
);
297 if (bo
&& tiling
!= INTEL_TILING_NONE
) {
298 uint32_t real_tiling
= tiling
;
301 err
= drm_intel_bo_set_tiling(bo
, &real_tiling
, pitch
);
302 if (err
|| real_tiling
!= tiling
) {
303 assert(!"tiling mismatch");
304 drm_intel_bo_unreference(bo
);
309 return (struct intel_bo
*) bo
;
313 intel_winsys_import_handle(struct intel_winsys
*winsys
,
315 const struct winsys_handle
*handle
,
316 unsigned long height
,
317 enum intel_tiling_mode
*tiling
,
318 unsigned long *pitch
)
320 uint32_t real_tiling
, swizzle
;
324 switch (handle
->type
) {
325 case DRM_API_HANDLE_TYPE_SHARED
:
327 const uint32_t gem_name
= handle
->handle
;
328 bo
= drm_intel_bo_gem_create_from_name(winsys
->bufmgr
,
332 case DRM_API_HANDLE_TYPE_FD
:
334 const int fd
= (int) handle
->handle
;
335 bo
= drm_intel_bo_gem_create_from_prime(winsys
->bufmgr
,
336 fd
, height
* handle
->stride
);
347 err
= drm_intel_bo_get_tiling(bo
, &real_tiling
, &swizzle
);
349 drm_intel_bo_unreference(bo
);
353 *tiling
= real_tiling
;
354 *pitch
= handle
->stride
;
356 return (struct intel_bo
*) bo
;
360 intel_winsys_export_handle(struct intel_winsys
*winsys
,
362 enum intel_tiling_mode tiling
,
364 unsigned long height
,
365 struct winsys_handle
*handle
)
369 switch (handle
->type
) {
370 case DRM_API_HANDLE_TYPE_SHARED
:
374 err
= drm_intel_bo_flink(gem_bo(bo
), &name
);
376 handle
->handle
= name
;
379 case DRM_API_HANDLE_TYPE_KMS
:
380 handle
->handle
= gem_bo(bo
)->handle
;
382 case DRM_API_HANDLE_TYPE_FD
:
386 err
= drm_intel_bo_gem_export_to_prime(gem_bo(bo
), &fd
);
399 handle
->stride
= pitch
;
405 intel_winsys_can_submit_bo(struct intel_winsys
*winsys
,
406 struct intel_bo
**bo_array
,
409 return !drm_intel_bufmgr_check_aperture_space((drm_intel_bo
**) bo_array
,
414 intel_winsys_submit_bo(struct intel_winsys
*winsys
,
415 enum intel_ring_type ring
,
416 struct intel_bo
*bo
, int used
,
417 struct intel_context
*ctx
,
420 const unsigned long exec_flags
= (unsigned long) ring
| flags
;
422 /* logical contexts are only available for the render ring */
423 if (ring
!= INTEL_RING_RENDER
)
427 return drm_intel_gem_bo_context_exec(gem_bo(bo
),
428 (drm_intel_context
*) ctx
, used
, exec_flags
);
431 return drm_intel_bo_mrb_exec(gem_bo(bo
),
432 used
, NULL
, 0, 0, exec_flags
);
437 intel_winsys_decode_bo(struct intel_winsys
*winsys
,
438 struct intel_bo
*bo
, int used
)
442 ptr
= intel_bo_map(bo
, false);
444 debug_printf("failed to map buffer for decoding\n");
448 pipe_mutex_lock(winsys
->mutex
);
450 if (!winsys
->decode
) {
451 winsys
->decode
= drm_intel_decode_context_alloc(winsys
->info
.devid
);
452 if (!winsys
->decode
) {
453 pipe_mutex_unlock(winsys
->mutex
);
458 /* debug_printf()/debug_error() uses stderr by default */
459 drm_intel_decode_set_output_file(winsys
->decode
, stderr
);
465 drm_intel_decode_set_batch_pointer(winsys
->decode
,
466 ptr
, gem_bo(bo
)->offset64
, used
);
468 drm_intel_decode(winsys
->decode
);
470 pipe_mutex_unlock(winsys
->mutex
);
476 intel_bo_reference(struct intel_bo
*bo
)
478 drm_intel_bo_reference(gem_bo(bo
));
482 intel_bo_unreference(struct intel_bo
*bo
)
484 drm_intel_bo_unreference(gem_bo(bo
));
488 intel_bo_map(struct intel_bo
*bo
, bool write_enable
)
492 err
= drm_intel_bo_map(gem_bo(bo
), write_enable
);
494 debug_error("failed to map bo");
498 return gem_bo(bo
)->virtual;
502 intel_bo_map_gtt(struct intel_bo
*bo
)
506 err
= drm_intel_gem_bo_map_gtt(gem_bo(bo
));
508 debug_error("failed to map bo");
512 return gem_bo(bo
)->virtual;
516 intel_bo_map_unsynchronized(struct intel_bo
*bo
)
520 err
= drm_intel_gem_bo_map_unsynchronized(gem_bo(bo
));
522 debug_error("failed to map bo");
526 return gem_bo(bo
)->virtual;
530 intel_bo_unmap(struct intel_bo
*bo
)
534 err
= drm_intel_bo_unmap(gem_bo(bo
));
539 intel_bo_pwrite(struct intel_bo
*bo
, unsigned long offset
,
540 unsigned long size
, const void *data
)
542 return drm_intel_bo_subdata(gem_bo(bo
), offset
, size
, data
);
546 intel_bo_pread(struct intel_bo
*bo
, unsigned long offset
,
547 unsigned long size
, void *data
)
549 return drm_intel_bo_get_subdata(gem_bo(bo
), offset
, size
, data
);
553 intel_bo_add_reloc(struct intel_bo
*bo
, uint32_t offset
,
554 struct intel_bo
*target_bo
, uint32_t target_offset
,
555 uint32_t read_domains
, uint32_t write_domain
,
556 uint64_t *presumed_offset
)
560 err
= drm_intel_bo_emit_reloc(gem_bo(bo
), offset
,
561 gem_bo(target_bo
), target_offset
,
562 read_domains
, write_domain
);
564 *presumed_offset
= gem_bo(target_bo
)->offset64
+ target_offset
;
570 intel_bo_get_reloc_count(struct intel_bo
*bo
)
572 return drm_intel_gem_bo_get_reloc_count(gem_bo(bo
));
576 intel_bo_truncate_relocs(struct intel_bo
*bo
, int start
)
578 drm_intel_gem_bo_clear_relocs(gem_bo(bo
), start
);
582 intel_bo_has_reloc(struct intel_bo
*bo
, struct intel_bo
*target_bo
)
584 return drm_intel_bo_references(gem_bo(bo
), gem_bo(target_bo
));
588 intel_bo_wait(struct intel_bo
*bo
, int64_t timeout
)
592 err
= drm_intel_gem_bo_wait(gem_bo(bo
), timeout
);
593 /* consider the bo idle on errors */
594 if (err
&& err
!= -ETIME
)