1 /**********************************************************
2 * Copyright 2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
29 #include "util/u_debug.h"
30 #include "util/u_memory.h"
31 #include "util/u_debug_stack.h"
32 #include "pipebuffer/pb_buffer.h"
33 #include "pipebuffer/pb_validate.h"
35 #include "svga_winsys.h"
36 #include "vmw_context.h"
37 #include "vmw_screen.h"
38 #include "vmw_buffer.h"
39 #include "vmw_surface.h"
40 #include "vmw_fence.h"
42 #define VMW_COMMAND_SIZE (64*1024)
43 #define VMW_SURFACE_RELOCS (1024)
44 #define VMW_REGION_RELOCS (512)
46 #define VMW_MUST_FLUSH_STACK 8
48 struct vmw_region_relocation
50 struct SVGAGuestPtr
*where
;
51 struct pb_buffer
*buffer
;
52 /* TODO: put offset info inside where */
56 struct vmw_svga_winsys_context
58 struct svga_winsys_context base
;
60 struct vmw_winsys_screen
*vws
;
64 struct debug_stack_frame must_flush_stack
[VMW_MUST_FLUSH_STACK
];
68 uint8_t buffer
[VMW_COMMAND_SIZE
];
75 struct vmw_svga_winsys_surface
*handles
[VMW_SURFACE_RELOCS
];
83 struct vmw_region_relocation relocs
[VMW_REGION_RELOCS
];
90 struct pb_validate
*validate
;
95 * The amount of GMR that is referred by the commands currently batched
98 uint32_t seen_regions
;
101 * Whether this context should fail to reserve more commands, not because it
102 * ran out of command space, but because a substantial ammount of GMR was
105 boolean preemptive_flush
;
109 static INLINE
struct vmw_svga_winsys_context
*
110 vmw_svga_winsys_context(struct svga_winsys_context
*swc
)
113 return (struct vmw_svga_winsys_context
*)swc
;
117 static INLINE
unsigned
118 vmw_translate_to_pb_flags(unsigned flags
)
121 if (flags
& SVGA_RELOC_READ
)
122 f
|= PB_USAGE_GPU_READ
;
124 if (flags
& SVGA_RELOC_WRITE
)
125 f
|= PB_USAGE_GPU_WRITE
;
130 static enum pipe_error
131 vmw_swc_flush(struct svga_winsys_context
*swc
,
132 struct pipe_fence_handle
**pfence
)
134 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
135 struct pipe_fence_handle
*fence
= NULL
;
139 ret
= pb_validate_validate(vswc
->validate
);
140 assert(ret
== PIPE_OK
);
143 /* Apply relocations */
144 for(i
= 0; i
< vswc
->region
.used
; ++i
) {
145 struct vmw_region_relocation
*reloc
= &vswc
->region
.relocs
[i
];
146 struct SVGAGuestPtr ptr
;
148 if(!vmw_gmr_bufmgr_region_ptr(reloc
->buffer
, &ptr
))
151 ptr
.offset
+= reloc
->offset
;
156 if (vswc
->command
.used
)
157 vmw_ioctl_command(vswc
->vws
,
158 vswc
->command
.buffer
,
162 fence
= vmw_pipe_fence(vswc
->last_fence
);
164 pb_validate_fence(vswc
->validate
, fence
);
167 vswc
->command
.used
= 0;
168 vswc
->command
.reserved
= 0;
170 for(i
= 0; i
< vswc
->surface
.used
+ vswc
->surface
.staged
; ++i
) {
171 struct vmw_svga_winsys_surface
*vsurf
=
172 vswc
->surface
.handles
[i
];
173 p_atomic_dec(&vsurf
->validated
);
174 vmw_svga_winsys_surface_reference(&vswc
->surface
.handles
[i
], NULL
);
177 vswc
->surface
.used
= 0;
178 vswc
->surface
.reserved
= 0;
180 for(i
= 0; i
< vswc
->region
.used
+ vswc
->region
.staged
; ++i
) {
181 pb_reference(&vswc
->region
.relocs
[i
].buffer
, NULL
);
184 vswc
->region
.used
= 0;
185 vswc
->region
.reserved
= 0;
188 vswc
->must_flush
= FALSE
;
190 vswc
->preemptive_flush
= FALSE
;
191 vswc
->seen_regions
= 0;
201 vmw_swc_reserve(struct svga_winsys_context
*swc
,
202 uint32_t nr_bytes
, uint32_t nr_relocs
)
204 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
207 /* Check if somebody forgot to check the previous failure */
208 if(vswc
->must_flush
) {
209 debug_printf("Forgot to flush:\n");
210 debug_backtrace_dump(vswc
->must_flush_stack
, VMW_MUST_FLUSH_STACK
);
211 assert(!vswc
->must_flush
);
215 assert(nr_bytes
<= vswc
->command
.size
);
216 if(nr_bytes
> vswc
->command
.size
)
219 if(vswc
->preemptive_flush
||
220 vswc
->command
.used
+ nr_bytes
> vswc
->command
.size
||
221 vswc
->surface
.used
+ nr_relocs
> vswc
->surface
.size
||
222 vswc
->region
.used
+ nr_relocs
> vswc
->region
.size
) {
224 vswc
->must_flush
= TRUE
;
225 debug_backtrace_capture(vswc
->must_flush_stack
, 1,
226 VMW_MUST_FLUSH_STACK
);
231 assert(vswc
->command
.used
+ nr_bytes
<= vswc
->command
.size
);
232 assert(vswc
->surface
.used
+ nr_relocs
<= vswc
->surface
.size
);
233 assert(vswc
->region
.used
+ nr_relocs
<= vswc
->region
.size
);
235 vswc
->command
.reserved
= nr_bytes
;
236 vswc
->surface
.reserved
= nr_relocs
;
237 vswc
->surface
.staged
= 0;
238 vswc
->region
.reserved
= nr_relocs
;
239 vswc
->region
.staged
= 0;
241 return vswc
->command
.buffer
+ vswc
->command
.used
;
246 vmw_swc_surface_relocation(struct svga_winsys_context
*swc
,
248 struct svga_winsys_surface
*surface
,
251 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
252 struct vmw_svga_winsys_surface
*vsurf
;
255 *where
= SVGA3D_INVALID_ID
;
259 assert(vswc
->surface
.staged
< vswc
->surface
.reserved
);
261 vsurf
= vmw_svga_winsys_surface(surface
);
265 vmw_svga_winsys_surface_reference(&vswc
->surface
.handles
[vswc
->surface
.used
+ vswc
->surface
.staged
], vsurf
);
266 p_atomic_inc(&vsurf
->validated
);
267 ++vswc
->surface
.staged
;
272 vmw_swc_region_relocation(struct svga_winsys_context
*swc
,
273 struct SVGAGuestPtr
*where
,
274 struct svga_winsys_buffer
*buffer
,
278 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
279 struct vmw_region_relocation
*reloc
;
280 unsigned translated_flags
;
283 assert(vswc
->region
.staged
< vswc
->region
.reserved
);
285 reloc
= &vswc
->region
.relocs
[vswc
->region
.used
+ vswc
->region
.staged
];
286 reloc
->where
= where
;
287 pb_reference(&reloc
->buffer
, vmw_pb_buffer(buffer
));
288 reloc
->offset
= offset
;
290 ++vswc
->region
.staged
;
292 translated_flags
= vmw_translate_to_pb_flags(flags
);
293 ret
= pb_validate_add_buffer(vswc
->validate
, reloc
->buffer
, translated_flags
);
294 /* TODO: Update pipebuffer to reserve buffers and not fail here */
295 assert(ret
== PIPE_OK
);
298 * Flush preemptively the FIFO commands to keep the GMR working set within
301 * This is necessary for applications like SPECviewperf that generate huge
302 * amounts of immediate vertex data, so that we don't pile up too much of
303 * that vertex data neither in the guest nor in the host.
305 * Note that in the current implementation if a region is referred twice in
306 * a command stream, it will be accounted twice. We could detect repeated
307 * regions and count only once, but there is no incentive to do that, since
308 * regions are typically short-lived; always referred in a single command;
309 * and at the worst we just flush the commands a bit sooner, which for the
310 * SVGA virtual device it's not a performance issue since flushing commands
311 * to the FIFO won't cause flushing in the host.
313 vswc
->seen_regions
+= reloc
->buffer
->base
.size
;
314 if(vswc
->seen_regions
>= VMW_GMR_POOL_SIZE
/2)
315 vswc
->preemptive_flush
= TRUE
;
320 vmw_swc_commit(struct svga_winsys_context
*swc
)
322 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
324 assert(vswc
->command
.reserved
);
325 assert(vswc
->command
.used
+ vswc
->command
.reserved
<= vswc
->command
.size
);
326 vswc
->command
.used
+= vswc
->command
.reserved
;
327 vswc
->command
.reserved
= 0;
329 assert(vswc
->surface
.staged
<= vswc
->surface
.reserved
);
330 assert(vswc
->surface
.used
+ vswc
->surface
.staged
<= vswc
->surface
.size
);
331 vswc
->surface
.used
+= vswc
->surface
.staged
;
332 vswc
->surface
.staged
= 0;
333 vswc
->surface
.reserved
= 0;
335 assert(vswc
->region
.staged
<= vswc
->region
.reserved
);
336 assert(vswc
->region
.used
+ vswc
->region
.staged
<= vswc
->region
.size
);
337 vswc
->region
.used
+= vswc
->region
.staged
;
338 vswc
->region
.staged
= 0;
339 vswc
->region
.reserved
= 0;
344 vmw_swc_destroy(struct svga_winsys_context
*swc
)
346 struct vmw_svga_winsys_context
*vswc
= vmw_svga_winsys_context(swc
);
349 for(i
= 0; i
< vswc
->region
.used
; ++i
) {
350 pb_reference(&vswc
->region
.relocs
[i
].buffer
, NULL
);
353 for(i
= 0; i
< vswc
->surface
.used
; ++i
) {
354 p_atomic_dec(&vswc
->surface
.handles
[i
]->validated
);
355 vmw_svga_winsys_surface_reference(&vswc
->surface
.handles
[i
], NULL
);
357 pb_validate_destroy(vswc
->validate
);
358 vmw_ioctl_context_destroy(vswc
->vws
, swc
->cid
);
363 struct svga_winsys_context
*
364 vmw_svga_winsys_context_create(struct svga_winsys_screen
*sws
)
366 struct vmw_winsys_screen
*vws
= vmw_winsys_screen(sws
);
367 struct vmw_svga_winsys_context
*vswc
;
369 vswc
= CALLOC_STRUCT(vmw_svga_winsys_context
);
373 vswc
->base
.destroy
= vmw_swc_destroy
;
374 vswc
->base
.reserve
= vmw_swc_reserve
;
375 vswc
->base
.surface_relocation
= vmw_swc_surface_relocation
;
376 vswc
->base
.region_relocation
= vmw_swc_region_relocation
;
377 vswc
->base
.commit
= vmw_swc_commit
;
378 vswc
->base
.flush
= vmw_swc_flush
;
380 vswc
->base
.cid
= vmw_ioctl_context_create(vws
);
384 vswc
->command
.size
= VMW_COMMAND_SIZE
;
385 vswc
->surface
.size
= VMW_SURFACE_RELOCS
;
386 vswc
->region
.size
= VMW_REGION_RELOCS
;
388 vswc
->validate
= pb_validate_create();
389 if(!vswc
->validate
) {