1 /**********************************************************
2 * Copyright 2009-2011 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
25 #include "util/u_memory.h"
26 #include "util/u_atomic.h"
27 #include "util/u_double_list.h"
28 #include "os/os_thread.h"
30 #include "pipebuffer/pb_buffer_fenced.h"
32 #include "vmw_screen.h"
33 #include "vmw_fence.h"
40 struct pb_fence_ops base
;
41 struct vmw_winsys_screen
*vws
;
48 struct list_head not_signaled
;
49 uint32_t last_signaled
;
50 uint32_t last_emitted
;
55 struct list_head ops_list
;
64 * vmw_fence_seq_is_signaled - Check whether a fence seqno is
67 * @ops: Pointer to a struct pb_fence_ops.
71 vmw_fence_seq_is_signaled(uint32_t seq
, uint32_t last
, uint32_t cur
)
73 return (cur
- last
<= cur
- seq
);
78 * vmw_fence_ops - Return the vmw_fence_ops structure backing a
79 * struct pb_fence_ops pointer.
81 * @ops: Pointer to a struct pb_fence_ops.
84 static INLINE
struct vmw_fence_ops
*
85 vmw_fence_ops(struct pb_fence_ops
*ops
)
88 return (struct vmw_fence_ops
*)ops
;
93 * vmw_fences_release - Release all fences from the not_signaled
96 * @ops: Pointer to a struct vmw_fence_ops.
100 vmw_fences_release(struct vmw_fence_ops
*ops
)
102 struct vmw_fence
*fence
, *n
;
104 pipe_mutex_lock(ops
->mutex
);
105 LIST_FOR_EACH_ENTRY_SAFE(fence
, n
, &ops
->not_signaled
, ops_list
)
106 LIST_DELINIT(&fence
->ops_list
);
107 pipe_mutex_unlock(ops
->mutex
);
111 * vmw_fences_signal - Traverse the not_signaled list and try to
112 * signal unsignaled fences.
114 * @ops: Pointer to a struct pb_fence_ops.
115 * @signaled: Seqno that has signaled.
116 * @emitted: Last seqno emitted by the kernel.
117 * @has_emitted: Whether we provide the emitted value.
121 vmw_fences_signal(struct pb_fence_ops
*fence_ops
,
126 struct vmw_fence_ops
*ops
= NULL
;
127 struct vmw_fence
*fence
, *n
;
129 if (fence_ops
== NULL
)
132 ops
= vmw_fence_ops(fence_ops
);
133 pipe_mutex_lock(ops
->mutex
);
136 emitted
= ops
->last_emitted
;
137 if (emitted
- signaled
> (1 << 30))
141 if (signaled
== ops
->last_signaled
&& emitted
== ops
->last_emitted
)
144 LIST_FOR_EACH_ENTRY_SAFE(fence
, n
, &ops
->not_signaled
, ops_list
) {
145 if (!vmw_fence_seq_is_signaled(fence
->seqno
, signaled
, emitted
))
148 p_atomic_set(&fence
->signalled
, 1);
149 LIST_DELINIT(&fence
->ops_list
);
151 ops
->last_signaled
= signaled
;
152 ops
->last_emitted
= emitted
;
155 pipe_mutex_unlock(ops
->mutex
);
160 * vmw_fence - return the vmw_fence object identified by a
161 * struct pipe_fence_handle *
163 * @fence: The opaque pipe fence handle.
165 static INLINE
struct vmw_fence
*
166 vmw_fence(struct pipe_fence_handle
*fence
)
168 return (struct vmw_fence
*) fence
;
173 * vmw_fence_create - Create a user-space fence object.
175 * @fence_ops: The fence_ops manager to register with.
176 * @handle: Handle identifying the kernel fence object.
177 * @mask: Mask of flags that this fence object may signal.
179 * Returns NULL on failure.
181 struct pipe_fence_handle
*
182 vmw_fence_create(struct pb_fence_ops
*fence_ops
, uint32_t handle
,
183 uint32_t seqno
, uint32_t mask
)
185 struct vmw_fence
*fence
= CALLOC_STRUCT(vmw_fence
);
186 struct vmw_fence_ops
*ops
= vmw_fence_ops(fence_ops
);
191 p_atomic_set(&fence
->refcount
, 1);
192 fence
->handle
= handle
;
194 fence
->seqno
= seqno
;
195 p_atomic_set(&fence
->signalled
, 0);
196 pipe_mutex_lock(ops
->mutex
);
198 if (vmw_fence_seq_is_signaled(seqno
, ops
->last_signaled
, seqno
)) {
199 p_atomic_set(&fence
->signalled
, 1);
200 LIST_INITHEAD(&fence
->ops_list
);
202 p_atomic_set(&fence
->signalled
, 0);
203 LIST_ADDTAIL(&fence
->ops_list
, &ops
->not_signaled
);
206 pipe_mutex_unlock(ops
->mutex
);
208 return (struct pipe_fence_handle
*) fence
;
213 * vmw_fence_reference - Reference / unreference a vmw fence object.
215 * @vws: Pointer to the winsys screen.
216 * @ptr: Pointer to reference transfer destination.
217 * @fence: Pointer to object to reference. May be NULL.
220 vmw_fence_reference(struct vmw_winsys_screen
*vws
,
221 struct pipe_fence_handle
**ptr
,
222 struct pipe_fence_handle
*fence
)
225 struct vmw_fence
*vfence
= vmw_fence(*ptr
);
227 if (p_atomic_dec_zero(&vfence
->refcount
)) {
228 struct vmw_fence_ops
*ops
= vmw_fence_ops(vws
->fence_ops
);
230 vmw_ioctl_fence_unref(vws
, vfence
->handle
);
232 pipe_mutex_lock(ops
->mutex
);
233 LIST_DELINIT(&vfence
->ops_list
);
234 pipe_mutex_unlock(ops
->mutex
);
241 struct vmw_fence
*vfence
= vmw_fence(fence
);
243 p_atomic_inc(&vfence
->refcount
);
251 * vmw_fence_signalled - Check whether a fence object is signalled.
253 * @vws: Pointer to the winsys screen.
254 * @fence: Handle to the fence object.
255 * @flag: Fence flags to check. If the fence object can't signal
256 * a flag, it is assumed to be already signaled.
258 * Returns 0 if the fence object was signaled, nonzero otherwise.
261 vmw_fence_signalled(struct vmw_winsys_screen
*vws
,
262 struct pipe_fence_handle
*fence
,
265 struct vmw_fence
*vfence
;
266 int32_t vflags
= SVGA_FENCE_FLAG_EXEC
;
273 vfence
= vmw_fence(fence
);
274 old
= p_atomic_read(&vfence
->signalled
);
276 vflags
&= ~vfence
->mask
;
278 if ((old
& vflags
) == vflags
)
282 * Currently we update signaled fences on each execbuf call.
283 * That should really be sufficient, and we can avoid
284 * a lot of kernel calls this way.
287 ret
= vmw_ioctl_fence_signalled(vws
, vfence
->handle
, vflags
);
290 p_atomic_set(&vfence
->signalled
, 1);
299 * vmw_fence_finish - Wait for a fence object to signal.
301 * @vws: Pointer to the winsys screen.
302 * @fence: Handle to the fence object.
303 * @flag: Fence flags to wait for. If the fence object can't signal
304 * a flag, it is assumed to be already signaled.
306 * Returns 0 if the wait succeeded. Nonzero otherwise.
309 vmw_fence_finish(struct vmw_winsys_screen
*vws
,
310 struct pipe_fence_handle
*fence
,
313 struct vmw_fence
*vfence
;
314 int32_t vflags
= SVGA_FENCE_FLAG_EXEC
;
321 vfence
= vmw_fence(fence
);
322 old
= p_atomic_read(&vfence
->signalled
);
323 vflags
&= ~vfence
->mask
;
325 if ((old
& vflags
) == vflags
)
328 ret
= vmw_ioctl_fence_finish(vws
, vfence
->handle
, vflags
);
335 prev
= p_atomic_cmpxchg(&vfence
->signalled
, old
, old
| vflags
);
336 } while (prev
!= old
);
344 * vmw_fence_ops_fence_reference - wrapper for the pb_fence_ops api.
346 * wrapper around vmw_fence_reference.
349 vmw_fence_ops_fence_reference(struct pb_fence_ops
*ops
,
350 struct pipe_fence_handle
**ptr
,
351 struct pipe_fence_handle
*fence
)
353 struct vmw_winsys_screen
*vws
= vmw_fence_ops(ops
)->vws
;
355 vmw_fence_reference(vws
, ptr
, fence
);
359 * vmw_fence_ops_fence_signalled - wrapper for the pb_fence_ops api.
361 * wrapper around vmw_fence_signalled.
364 vmw_fence_ops_fence_signalled(struct pb_fence_ops
*ops
,
365 struct pipe_fence_handle
*fence
,
368 struct vmw_winsys_screen
*vws
= vmw_fence_ops(ops
)->vws
;
370 return vmw_fence_signalled(vws
, fence
, flag
);
375 * vmw_fence_ops_fence_finish - wrapper for the pb_fence_ops api.
377 * wrapper around vmw_fence_finish.
380 vmw_fence_ops_fence_finish(struct pb_fence_ops
*ops
,
381 struct pipe_fence_handle
*fence
,
384 struct vmw_winsys_screen
*vws
= vmw_fence_ops(ops
)->vws
;
386 return vmw_fence_finish(vws
, fence
, flag
);
391 * vmw_fence_ops_destroy - Destroy a pb_fence_ops function table.
393 * @ops: The function table to destroy.
395 * Part of the pb_fence_ops api.
398 vmw_fence_ops_destroy(struct pb_fence_ops
*ops
)
400 vmw_fences_release(vmw_fence_ops(ops
));
406 * vmw_fence_ops_create - Create a pb_fence_ops function table.
408 * @vws: Pointer to a struct vmw_winsys_screen.
410 * Returns a pointer to a pb_fence_ops function table to interface
411 * with pipe_buffer. This function is typically called on driver setup.
413 * Returns NULL on failure.
415 struct pb_fence_ops
*
416 vmw_fence_ops_create(struct vmw_winsys_screen
*vws
)
418 struct vmw_fence_ops
*ops
;
420 ops
= CALLOC_STRUCT(vmw_fence_ops
);
424 pipe_mutex_init(ops
->mutex
);
425 LIST_INITHEAD(&ops
->not_signaled
);
426 ops
->base
.destroy
= &vmw_fence_ops_destroy
;
427 ops
->base
.fence_reference
= &vmw_fence_ops_fence_reference
;
428 ops
->base
.fence_signalled
= &vmw_fence_ops_fence_signalled
;
429 ops
->base
.fence_finish
= &vmw_fence_ops_fence_finish
;