1 /**********************************************************
2 * Copyright 2009-2015 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 * SVGA buffer manager for Guest Memory Regions (GMRs).
30 * GMRs are used for pixel and vertex data upload/download to/from the virtual
31 * SVGA hardware. There is a limited number of GMRs available, and
32 * creating/destroying them is also a slow operation so we must suballocate
35 * This file implements a pipebuffer library's buffer manager, so that we can
36 * use pipepbuffer's suballocation, fencing, and debugging facilities with GMRs.
38 * @author Jose Fonseca <jfonseca@vmware.com>
44 #include "util/u_inlines.h"
45 #include "util/u_memory.h"
46 #include "pipebuffer/pb_buffer.h"
47 #include "pipebuffer/pb_bufmgr.h"
49 #include "svga_winsys.h"
51 #include "vmw_screen.h"
52 #include "vmw_buffer.h"
54 struct vmw_gmr_bufmgr
;
59 struct pb_buffer base
;
61 struct vmw_gmr_bufmgr
*mgr
;
63 struct vmw_region
*region
;
69 extern const struct pb_vtbl vmw_gmr_buffer_vtbl
;
72 static inline struct vmw_gmr_buffer
*
73 vmw_gmr_buffer(struct pb_buffer
*buf
)
76 assert(buf
->vtbl
== &vmw_gmr_buffer_vtbl
);
77 return (struct vmw_gmr_buffer
*)buf
;
83 struct pb_manager base
;
85 struct vmw_winsys_screen
*vws
;
89 static inline struct vmw_gmr_bufmgr
*
90 vmw_gmr_bufmgr(struct pb_manager
*mgr
)
94 /* Make sure our extra flags don't collide with pipebuffer's flags */
95 STATIC_ASSERT((VMW_BUFFER_USAGE_SHARED
& PB_USAGE_ALL
) == 0);
96 STATIC_ASSERT((VMW_BUFFER_USAGE_SYNC
& PB_USAGE_ALL
) == 0);
98 return (struct vmw_gmr_bufmgr
*)mgr
;
103 vmw_gmr_buffer_destroy(struct pb_buffer
*_buf
)
105 struct vmw_gmr_buffer
*buf
= vmw_gmr_buffer(_buf
);
107 vmw_ioctl_region_unmap(buf
->region
);
109 vmw_ioctl_region_destroy(buf
->region
);
116 vmw_gmr_buffer_map(struct pb_buffer
*_buf
,
117 enum pb_usage_flags flags
,
120 struct vmw_gmr_buffer
*buf
= vmw_gmr_buffer(_buf
);
124 buf
->map
= vmw_ioctl_region_map(buf
->region
);
130 if ((_buf
->usage
& VMW_BUFFER_USAGE_SYNC
) &&
131 !(flags
& PB_USAGE_UNSYNCHRONIZED
)) {
132 ret
= vmw_ioctl_syncforcpu(buf
->region
,
133 !!(flags
& PB_USAGE_DONTBLOCK
),
134 !(flags
& PB_USAGE_CPU_WRITE
),
145 vmw_gmr_buffer_unmap(struct pb_buffer
*_buf
)
147 struct vmw_gmr_buffer
*buf
= vmw_gmr_buffer(_buf
);
148 enum pb_usage_flags flags
= buf
->map_flags
;
150 if ((_buf
->usage
& VMW_BUFFER_USAGE_SYNC
) &&
151 !(flags
& PB_USAGE_UNSYNCHRONIZED
)) {
152 vmw_ioctl_releasefromcpu(buf
->region
,
153 !(flags
& PB_USAGE_CPU_WRITE
),
160 vmw_gmr_buffer_get_base_buffer(struct pb_buffer
*buf
,
161 struct pb_buffer
**base_buf
,
169 static enum pipe_error
170 vmw_gmr_buffer_validate( struct pb_buffer
*_buf
,
171 struct pb_validate
*vl
,
172 enum pb_usage_flags flags
)
180 vmw_gmr_buffer_fence( struct pb_buffer
*_buf
,
181 struct pipe_fence_handle
*fence
)
183 /* We don't need to do anything, as the pipebuffer library
184 * will take care of delaying the destruction of fenced buffers */
188 const struct pb_vtbl vmw_gmr_buffer_vtbl
= {
189 vmw_gmr_buffer_destroy
,
191 vmw_gmr_buffer_unmap
,
192 vmw_gmr_buffer_validate
,
193 vmw_gmr_buffer_fence
,
194 vmw_gmr_buffer_get_base_buffer
198 static struct pb_buffer
*
199 vmw_gmr_bufmgr_create_buffer(struct pb_manager
*_mgr
,
201 const struct pb_desc
*pb_desc
)
203 struct vmw_gmr_bufmgr
*mgr
= vmw_gmr_bufmgr(_mgr
);
204 struct vmw_winsys_screen
*vws
= mgr
->vws
;
205 struct vmw_gmr_buffer
*buf
;
206 const struct vmw_buffer_desc
*desc
=
207 (const struct vmw_buffer_desc
*) pb_desc
;
209 buf
= CALLOC_STRUCT(vmw_gmr_buffer
);
213 pipe_reference_init(&buf
->base
.reference
, 1);
214 buf
->base
.alignment
= pb_desc
->alignment
;
215 buf
->base
.usage
= pb_desc
->usage
& ~VMW_BUFFER_USAGE_SHARED
;
216 buf
->base
.vtbl
= &vmw_gmr_buffer_vtbl
;
218 buf
->base
.size
= size
;
219 if ((pb_desc
->usage
& VMW_BUFFER_USAGE_SHARED
) && desc
->region
) {
220 buf
->region
= desc
->region
;
222 buf
->region
= vmw_ioctl_region_create(vws
, size
);
236 vmw_gmr_bufmgr_flush(struct pb_manager
*mgr
)
243 vmw_gmr_bufmgr_destroy(struct pb_manager
*_mgr
)
245 struct vmw_gmr_bufmgr
*mgr
= vmw_gmr_bufmgr(_mgr
);
251 vmw_gmr_bufmgr_create(struct vmw_winsys_screen
*vws
)
253 struct vmw_gmr_bufmgr
*mgr
;
255 mgr
= CALLOC_STRUCT(vmw_gmr_bufmgr
);
259 mgr
->base
.destroy
= vmw_gmr_bufmgr_destroy
;
260 mgr
->base
.create_buffer
= vmw_gmr_bufmgr_create_buffer
;
261 mgr
->base
.flush
= vmw_gmr_bufmgr_flush
;
270 vmw_gmr_bufmgr_region_ptr(struct pb_buffer
*buf
,
271 struct SVGAGuestPtr
*ptr
)
273 struct pb_buffer
*base_buf
;
275 struct vmw_gmr_buffer
*gmr_buf
;
277 pb_get_base_buffer( buf
, &base_buf
, &offset
);
279 gmr_buf
= vmw_gmr_buffer(base_buf
);
283 *ptr
= vmw_ioctl_region_ptr(gmr_buf
->region
);
285 ptr
->offset
+= offset
;
291 struct svga_winsys_buffer
{
292 struct pb_buffer
*pb_buf
;
293 struct debug_flush_buf
*fbuf
;
297 vmw_pb_buffer(struct svga_winsys_buffer
*buffer
)
300 return buffer
->pb_buf
;
303 struct svga_winsys_buffer
*
304 vmw_svga_winsys_buffer_wrap(struct pb_buffer
*buffer
)
306 struct svga_winsys_buffer
*buf
;
311 buf
= CALLOC_STRUCT(svga_winsys_buffer
);
313 pb_reference(&buffer
, NULL
);
317 buf
->pb_buf
= buffer
;
318 buf
->fbuf
= debug_flush_buf_create(FALSE
, VMW_DEBUG_FLUSH_STACK
);
322 struct debug_flush_buf
*
323 vmw_debug_flush_buf(struct svga_winsys_buffer
*buffer
)
331 vmw_svga_winsys_buffer_destroy(struct svga_winsys_screen
*sws
,
332 struct svga_winsys_buffer
*buf
)
334 struct pb_buffer
*pbuf
= vmw_pb_buffer(buf
);
336 pb_reference(&pbuf
, NULL
);
338 debug_flush_buf_reference(&buf
->fbuf
, NULL
);
344 vmw_svga_winsys_buffer_map(struct svga_winsys_screen
*sws
,
345 struct svga_winsys_buffer
*buf
,
346 enum pipe_transfer_usage flags
)
351 if (flags
& PIPE_TRANSFER_UNSYNCHRONIZED
)
352 flags
&= ~PIPE_TRANSFER_DONTBLOCK
;
354 /* NOTE: we're passing PIPE_TRANSFER_x flags instead of
355 * PB_USAGE_x flags here. We should probably fix that.
357 STATIC_ASSERT((unsigned) PB_USAGE_CPU_READ
==
358 (unsigned) PIPE_TRANSFER_READ
);
359 STATIC_ASSERT((unsigned) PB_USAGE_CPU_WRITE
==
360 (unsigned) PIPE_TRANSFER_WRITE
);
361 STATIC_ASSERT((unsigned) PB_USAGE_GPU_READ
==
362 (unsigned) PIPE_TRANSFER_MAP_DIRECTLY
);
363 STATIC_ASSERT((unsigned) PB_USAGE_DONTBLOCK
==
364 (unsigned) PIPE_TRANSFER_DONTBLOCK
);
365 STATIC_ASSERT((unsigned) PB_USAGE_UNSYNCHRONIZED
==
366 (unsigned) PIPE_TRANSFER_UNSYNCHRONIZED
);
368 map
= pb_map(vmw_pb_buffer(buf
), flags
& PB_USAGE_ALL
, NULL
);
372 debug_flush_map(buf
->fbuf
, flags
);
380 vmw_svga_winsys_buffer_unmap(struct svga_winsys_screen
*sws
,
381 struct svga_winsys_buffer
*buf
)
386 debug_flush_unmap(buf
->fbuf
);
389 pb_unmap(vmw_pb_buffer(buf
));