svga/drm: Flushing preemptively on a 1/3 of the aperture.
[mesa.git] / src / gallium / winsys / svga / drm / vmw_context.c
1 /**********************************************************
2 * Copyright 2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "svga_cmd.h"
28
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/u_debug_stack.h"
32 #include "pipebuffer/pb_buffer.h"
33 #include "pipebuffer/pb_validate.h"
34
35 #include "svga_winsys.h"
36 #include "vmw_context.h"
37 #include "vmw_screen.h"
38 #include "vmw_buffer.h"
39 #include "vmw_surface.h"
40 #include "vmw_fence.h"
41
42 #define VMW_COMMAND_SIZE (64*1024)
43 #define VMW_SURFACE_RELOCS (1024)
44 #define VMW_REGION_RELOCS (512)
45
46 #define VMW_MUST_FLUSH_STACK 8
47
48 struct vmw_region_relocation
49 {
50 struct SVGAGuestPtr *where;
51 struct pb_buffer *buffer;
52 /* TODO: put offset info inside where */
53 uint32 offset;
54 };
55
56 struct vmw_svga_winsys_context
57 {
58 struct svga_winsys_context base;
59
60 struct vmw_winsys_screen *vws;
61
62 #ifdef DEBUG
63 boolean must_flush;
64 struct debug_stack_frame must_flush_stack[VMW_MUST_FLUSH_STACK];
65 #endif
66
67 struct {
68 uint8_t buffer[VMW_COMMAND_SIZE];
69 uint32_t size;
70 uint32_t used;
71 uint32_t reserved;
72 } command;
73
74 struct {
75 struct vmw_svga_winsys_surface *handles[VMW_SURFACE_RELOCS];
76 uint32_t size;
77 uint32_t used;
78 uint32_t staged;
79 uint32_t reserved;
80 } surface;
81
82 struct {
83 struct vmw_region_relocation relocs[VMW_REGION_RELOCS];
84 uint32_t size;
85 uint32_t used;
86 uint32_t staged;
87 uint32_t reserved;
88 } region;
89
90 struct pb_validate *validate;
91
92 uint32_t last_fence;
93
94 /**
95 * The amount of GMR that is referred by the commands currently batched
96 * in the context.
97 */
98 uint32_t seen_regions;
99
100 /**
101 * Whether this context should fail to reserve more commands, not because it
102 * ran out of command space, but because a substantial ammount of GMR was
103 * referred.
104 */
105 boolean preemptive_flush;
106
107 boolean throttle_set;
108 uint32_t throttle_us;
109 };
110
111
112 static INLINE struct vmw_svga_winsys_context *
113 vmw_svga_winsys_context(struct svga_winsys_context *swc)
114 {
115 assert(swc);
116 return (struct vmw_svga_winsys_context *)swc;
117 }
118
119
120 static INLINE unsigned
121 vmw_translate_to_pb_flags(unsigned flags)
122 {
123 unsigned f = 0;
124 if (flags & SVGA_RELOC_READ)
125 f |= PB_USAGE_GPU_READ;
126
127 if (flags & SVGA_RELOC_WRITE)
128 f |= PB_USAGE_GPU_WRITE;
129
130 return f;
131 }
132
133 static enum pipe_error
134 vmw_swc_flush(struct svga_winsys_context *swc,
135 struct pipe_fence_handle **pfence)
136 {
137 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
138 struct pipe_fence_handle *fence = NULL;
139 unsigned i;
140 enum pipe_error ret;
141 uint32_t throttle_us;
142
143 ret = pb_validate_validate(vswc->validate);
144 assert(ret == PIPE_OK);
145 if(ret == PIPE_OK) {
146
147 /* Apply relocations */
148 for(i = 0; i < vswc->region.used; ++i) {
149 struct vmw_region_relocation *reloc = &vswc->region.relocs[i];
150 struct SVGAGuestPtr ptr;
151
152 if(!vmw_gmr_bufmgr_region_ptr(reloc->buffer, &ptr))
153 assert(0);
154
155 ptr.offset += reloc->offset;
156
157 *reloc->where = ptr;
158 }
159
160 throttle_us = vswc->throttle_set ?
161 vswc->throttle_us : vswc->vws->default_throttle_us;
162
163 if (vswc->command.used)
164 vmw_ioctl_command(vswc->vws,
165 vswc->base.cid,
166 throttle_us,
167 vswc->command.buffer,
168 vswc->command.used,
169 &vswc->last_fence);
170
171 fence = vmw_pipe_fence(vswc->last_fence);
172
173 pb_validate_fence(vswc->validate, fence);
174 }
175
176 vswc->command.used = 0;
177 vswc->command.reserved = 0;
178
179 for(i = 0; i < vswc->surface.used + vswc->surface.staged; ++i) {
180 struct vmw_svga_winsys_surface *vsurf =
181 vswc->surface.handles[i];
182 p_atomic_dec(&vsurf->validated);
183 vmw_svga_winsys_surface_reference(&vswc->surface.handles[i], NULL);
184 }
185
186 vswc->surface.used = 0;
187 vswc->surface.reserved = 0;
188
189 for(i = 0; i < vswc->region.used + vswc->region.staged; ++i) {
190 pb_reference(&vswc->region.relocs[i].buffer, NULL);
191 }
192
193 vswc->region.used = 0;
194 vswc->region.reserved = 0;
195
196 #ifdef DEBUG
197 vswc->must_flush = FALSE;
198 #endif
199 vswc->preemptive_flush = FALSE;
200 vswc->seen_regions = 0;
201
202 if(pfence)
203 *pfence = fence;
204
205 return ret;
206 }
207
208
209 static void *
210 vmw_swc_reserve(struct svga_winsys_context *swc,
211 uint32_t nr_bytes, uint32_t nr_relocs )
212 {
213 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
214
215 #ifdef DEBUG
216 /* Check if somebody forgot to check the previous failure */
217 if(vswc->must_flush) {
218 debug_printf("Forgot to flush:\n");
219 debug_backtrace_dump(vswc->must_flush_stack, VMW_MUST_FLUSH_STACK);
220 assert(!vswc->must_flush);
221 }
222 #endif
223
224 assert(nr_bytes <= vswc->command.size);
225 if(nr_bytes > vswc->command.size)
226 return NULL;
227
228 if(vswc->preemptive_flush ||
229 vswc->command.used + nr_bytes > vswc->command.size ||
230 vswc->surface.used + nr_relocs > vswc->surface.size ||
231 vswc->region.used + nr_relocs > vswc->region.size) {
232 #ifdef DEBUG
233 vswc->must_flush = TRUE;
234 debug_backtrace_capture(vswc->must_flush_stack, 1,
235 VMW_MUST_FLUSH_STACK);
236 #endif
237 return NULL;
238 }
239
240 assert(vswc->command.used + nr_bytes <= vswc->command.size);
241 assert(vswc->surface.used + nr_relocs <= vswc->surface.size);
242 assert(vswc->region.used + nr_relocs <= vswc->region.size);
243
244 vswc->command.reserved = nr_bytes;
245 vswc->surface.reserved = nr_relocs;
246 vswc->surface.staged = 0;
247 vswc->region.reserved = nr_relocs;
248 vswc->region.staged = 0;
249
250 return vswc->command.buffer + vswc->command.used;
251 }
252
253
254 static void
255 vmw_swc_surface_relocation(struct svga_winsys_context *swc,
256 uint32 *where,
257 struct svga_winsys_surface *surface,
258 unsigned flags)
259 {
260 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
261 struct vmw_svga_winsys_surface *vsurf;
262
263 if(!surface) {
264 *where = SVGA3D_INVALID_ID;
265 return;
266 }
267
268 assert(vswc->surface.staged < vswc->surface.reserved);
269
270 vsurf = vmw_svga_winsys_surface(surface);
271
272 *where = vsurf->sid;
273
274 vmw_svga_winsys_surface_reference(&vswc->surface.handles[vswc->surface.used + vswc->surface.staged], vsurf);
275 p_atomic_inc(&vsurf->validated);
276 ++vswc->surface.staged;
277 }
278
279
280 static void
281 vmw_swc_region_relocation(struct svga_winsys_context *swc,
282 struct SVGAGuestPtr *where,
283 struct svga_winsys_buffer *buffer,
284 uint32 offset,
285 unsigned flags)
286 {
287 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
288 struct vmw_region_relocation *reloc;
289 unsigned translated_flags;
290 enum pipe_error ret;
291
292 assert(vswc->region.staged < vswc->region.reserved);
293
294 reloc = &vswc->region.relocs[vswc->region.used + vswc->region.staged];
295 reloc->where = where;
296 pb_reference(&reloc->buffer, vmw_pb_buffer(buffer));
297 reloc->offset = offset;
298
299 ++vswc->region.staged;
300
301 translated_flags = vmw_translate_to_pb_flags(flags);
302 ret = pb_validate_add_buffer(vswc->validate, reloc->buffer, translated_flags);
303 /* TODO: Update pipebuffer to reserve buffers and not fail here */
304 assert(ret == PIPE_OK);
305
306 /*
307 * Flush preemptively the FIFO commands to keep the GMR working set within
308 * the GMR pool size.
309 *
310 * This is necessary for applications like SPECviewperf that generate huge
311 * amounts of immediate vertex data, so that we don't pile up too much of
312 * that vertex data neither in the guest nor in the host.
313 *
314 * Note that in the current implementation if a region is referred twice in
315 * a command stream, it will be accounted twice. We could detect repeated
316 * regions and count only once, but there is no incentive to do that, since
317 * regions are typically short-lived; always referred in a single command;
318 * and at the worst we just flush the commands a bit sooner, which for the
319 * SVGA virtual device it's not a performance issue since flushing commands
320 * to the FIFO won't cause flushing in the host.
321 */
322 vswc->seen_regions += reloc->buffer->base.size;
323 if(vswc->seen_regions >= VMW_GMR_POOL_SIZE/3)
324 vswc->preemptive_flush = TRUE;
325 }
326
327
328 static void
329 vmw_swc_commit(struct svga_winsys_context *swc)
330 {
331 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
332
333 assert(vswc->command.reserved);
334 assert(vswc->command.used + vswc->command.reserved <= vswc->command.size);
335 vswc->command.used += vswc->command.reserved;
336 vswc->command.reserved = 0;
337
338 assert(vswc->surface.staged <= vswc->surface.reserved);
339 assert(vswc->surface.used + vswc->surface.staged <= vswc->surface.size);
340 vswc->surface.used += vswc->surface.staged;
341 vswc->surface.staged = 0;
342 vswc->surface.reserved = 0;
343
344 assert(vswc->region.staged <= vswc->region.reserved);
345 assert(vswc->region.used + vswc->region.staged <= vswc->region.size);
346 vswc->region.used += vswc->region.staged;
347 vswc->region.staged = 0;
348 vswc->region.reserved = 0;
349 }
350
351
352 static void
353 vmw_swc_destroy(struct svga_winsys_context *swc)
354 {
355 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
356 unsigned i;
357
358 for(i = 0; i < vswc->region.used; ++i) {
359 pb_reference(&vswc->region.relocs[i].buffer, NULL);
360 }
361
362 for(i = 0; i < vswc->surface.used; ++i) {
363 p_atomic_dec(&vswc->surface.handles[i]->validated);
364 vmw_svga_winsys_surface_reference(&vswc->surface.handles[i], NULL);
365 }
366 pb_validate_destroy(vswc->validate);
367 vmw_ioctl_context_destroy(vswc->vws, swc->cid);
368 FREE(vswc);
369 }
370
371
372 struct svga_winsys_context *
373 vmw_svga_winsys_context_create(struct svga_winsys_screen *sws)
374 {
375 struct vmw_winsys_screen *vws = vmw_winsys_screen(sws);
376 struct vmw_svga_winsys_context *vswc;
377
378 vswc = CALLOC_STRUCT(vmw_svga_winsys_context);
379 if(!vswc)
380 return NULL;
381
382 vswc->base.destroy = vmw_swc_destroy;
383 vswc->base.reserve = vmw_swc_reserve;
384 vswc->base.surface_relocation = vmw_swc_surface_relocation;
385 vswc->base.region_relocation = vmw_swc_region_relocation;
386 vswc->base.commit = vmw_swc_commit;
387 vswc->base.flush = vmw_swc_flush;
388
389 vswc->base.cid = vmw_ioctl_context_create(vws);
390
391 vswc->vws = vws;
392
393 vswc->command.size = VMW_COMMAND_SIZE;
394 vswc->surface.size = VMW_SURFACE_RELOCS;
395 vswc->region.size = VMW_REGION_RELOCS;
396
397 vswc->validate = pb_validate_create();
398 if(!vswc->validate) {
399 FREE(vswc);
400 return NULL;
401 }
402
403 return &vswc->base;
404 }
405
406
407 void
408 vmw_svga_context_set_throttling(struct pipe_context *pipe,
409 uint32_t throttle_us)
410 {
411 struct svga_winsys_context *swc = svga_winsys_context(pipe);
412 struct vmw_svga_winsys_context *vswc = vmw_svga_winsys_context(swc);
413
414 vswc->throttle_us = throttle_us;
415 vswc->throttle_set = TRUE;
416 }