Merge remote-tracking branch 'mesa-public/master' into vulkan
[mesa.git] / src / gallium / drivers / nouveau / nouveau_fence.c
1 /*
2 * Copyright 2010 Christoph Bumiller
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 */
22
23 #include "nouveau_screen.h"
24 #include "nouveau_winsys.h"
25 #include "nouveau_fence.h"
26 #include "os/os_time.h"
27
28 #ifdef PIPE_OS_UNIX
29 #include <sched.h>
30 #endif
31
32 bool
33 nouveau_fence_new(struct nouveau_screen *screen, struct nouveau_fence **fence,
34 bool emit)
35 {
36 *fence = CALLOC_STRUCT(nouveau_fence);
37 if (!*fence)
38 return false;
39
40 (*fence)->screen = screen;
41 (*fence)->ref = 1;
42 LIST_INITHEAD(&(*fence)->work);
43
44 if (emit)
45 nouveau_fence_emit(*fence);
46
47 return true;
48 }
49
50 static void
51 nouveau_fence_trigger_work(struct nouveau_fence *fence)
52 {
53 struct nouveau_fence_work *work, *tmp;
54
55 LIST_FOR_EACH_ENTRY_SAFE(work, tmp, &fence->work, list) {
56 work->func(work->data);
57 LIST_DEL(&work->list);
58 FREE(work);
59 }
60 }
61
62 void
63 nouveau_fence_emit(struct nouveau_fence *fence)
64 {
65 struct nouveau_screen *screen = fence->screen;
66
67 assert(fence->state == NOUVEAU_FENCE_STATE_AVAILABLE);
68
69 /* set this now, so that if fence.emit triggers a flush we don't recurse */
70 fence->state = NOUVEAU_FENCE_STATE_EMITTING;
71
72 ++fence->ref;
73
74 if (screen->fence.tail)
75 screen->fence.tail->next = fence;
76 else
77 screen->fence.head = fence;
78
79 screen->fence.tail = fence;
80
81 screen->fence.emit(&screen->base, &fence->sequence);
82
83 assert(fence->state == NOUVEAU_FENCE_STATE_EMITTING);
84 fence->state = NOUVEAU_FENCE_STATE_EMITTED;
85 }
86
87 void
88 nouveau_fence_del(struct nouveau_fence *fence)
89 {
90 struct nouveau_fence *it;
91 struct nouveau_screen *screen = fence->screen;
92
93 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED ||
94 fence->state == NOUVEAU_FENCE_STATE_FLUSHED) {
95 if (fence == screen->fence.head) {
96 screen->fence.head = fence->next;
97 if (!screen->fence.head)
98 screen->fence.tail = NULL;
99 } else {
100 for (it = screen->fence.head; it && it->next != fence; it = it->next);
101 it->next = fence->next;
102 if (screen->fence.tail == fence)
103 screen->fence.tail = it;
104 }
105 }
106
107 if (!LIST_IS_EMPTY(&fence->work)) {
108 debug_printf("WARNING: deleting fence with work still pending !\n");
109 nouveau_fence_trigger_work(fence);
110 }
111
112 FREE(fence);
113 }
114
115 void
116 nouveau_fence_update(struct nouveau_screen *screen, bool flushed)
117 {
118 struct nouveau_fence *fence;
119 struct nouveau_fence *next = NULL;
120 u32 sequence = screen->fence.update(&screen->base);
121
122 if (screen->fence.sequence_ack == sequence)
123 return;
124 screen->fence.sequence_ack = sequence;
125
126 for (fence = screen->fence.head; fence; fence = next) {
127 next = fence->next;
128 sequence = fence->sequence;
129
130 fence->state = NOUVEAU_FENCE_STATE_SIGNALLED;
131
132 nouveau_fence_trigger_work(fence);
133 nouveau_fence_ref(NULL, &fence);
134
135 if (sequence == screen->fence.sequence_ack)
136 break;
137 }
138 screen->fence.head = next;
139 if (!next)
140 screen->fence.tail = NULL;
141
142 if (flushed) {
143 for (fence = next; fence; fence = fence->next)
144 if (fence->state == NOUVEAU_FENCE_STATE_EMITTED)
145 fence->state = NOUVEAU_FENCE_STATE_FLUSHED;
146 }
147 }
148
149 #define NOUVEAU_FENCE_MAX_SPINS (1 << 31)
150
151 bool
152 nouveau_fence_signalled(struct nouveau_fence *fence)
153 {
154 struct nouveau_screen *screen = fence->screen;
155
156 if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED)
157 return true;
158
159 if (fence->state >= NOUVEAU_FENCE_STATE_EMITTED)
160 nouveau_fence_update(screen, false);
161
162 return fence->state == NOUVEAU_FENCE_STATE_SIGNALLED;
163 }
164
165 static bool
166 nouveau_fence_kick(struct nouveau_fence *fence)
167 {
168 struct nouveau_screen *screen = fence->screen;
169
170 /* wtf, someone is waiting on a fence in flush_notify handler? */
171 assert(fence->state != NOUVEAU_FENCE_STATE_EMITTING);
172
173 if (fence->state < NOUVEAU_FENCE_STATE_EMITTED) {
174 PUSH_SPACE(screen->pushbuf, 8);
175 /* The space allocation might trigger a flush, which could emit the
176 * current fence. So check again.
177 */
178 if (fence->state < NOUVEAU_FENCE_STATE_EMITTED)
179 nouveau_fence_emit(fence);
180 }
181
182 if (fence->state < NOUVEAU_FENCE_STATE_FLUSHED)
183 if (nouveau_pushbuf_kick(screen->pushbuf, screen->pushbuf->channel))
184 return false;
185
186 if (fence == screen->fence.current)
187 nouveau_fence_next(screen);
188
189 nouveau_fence_update(screen, false);
190
191 return true;
192 }
193
194 bool
195 nouveau_fence_wait(struct nouveau_fence *fence, struct pipe_debug_callback *debug)
196 {
197 struct nouveau_screen *screen = fence->screen;
198 uint32_t spins = 0;
199 int64_t start = 0;
200
201 if (debug && debug->debug_message)
202 start = os_time_get_nano();
203
204 if (!nouveau_fence_kick(fence))
205 return false;
206
207 do {
208 if (fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
209 if (debug && debug->debug_message)
210 pipe_debug_message(debug, PERF_INFO,
211 "stalled %.3f ms waiting for fence",
212 (os_time_get_nano() - start) / 1000000.f);
213 return true;
214 }
215 if (!spins)
216 NOUVEAU_DRV_STAT(screen, any_non_kernel_fence_sync_count, 1);
217 spins++;
218 #ifdef PIPE_OS_UNIX
219 if (!(spins % 8)) /* donate a few cycles */
220 sched_yield();
221 #endif
222
223 nouveau_fence_update(screen, false);
224 } while (spins < NOUVEAU_FENCE_MAX_SPINS);
225
226 debug_printf("Wait on fence %u (ack = %u, next = %u) timed out !\n",
227 fence->sequence,
228 screen->fence.sequence_ack, screen->fence.sequence);
229
230 return false;
231 }
232
233 void
234 nouveau_fence_next(struct nouveau_screen *screen)
235 {
236 if (screen->fence.current->state < NOUVEAU_FENCE_STATE_EMITTING) {
237 if (screen->fence.current->ref > 1)
238 nouveau_fence_emit(screen->fence.current);
239 else
240 return;
241 }
242
243 nouveau_fence_ref(NULL, &screen->fence.current);
244
245 nouveau_fence_new(screen, &screen->fence.current, false);
246 }
247
248 void
249 nouveau_fence_unref_bo(void *data)
250 {
251 struct nouveau_bo *bo = data;
252
253 nouveau_bo_ref(NULL, &bo);
254 }
255
256 bool
257 nouveau_fence_work(struct nouveau_fence *fence,
258 void (*func)(void *), void *data)
259 {
260 struct nouveau_fence_work *work;
261
262 if (!fence || fence->state == NOUVEAU_FENCE_STATE_SIGNALLED) {
263 func(data);
264 return true;
265 }
266
267 work = CALLOC_STRUCT(nouveau_fence_work);
268 if (!work)
269 return false;
270 work->func = func;
271 work->data = data;
272 LIST_ADD(&work->list, &fence->work);
273 p_atomic_inc(&fence->work_count);
274 if (fence->work_count > 64)
275 nouveau_fence_kick(fence);
276 return true;
277 }