1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
44 * Vertex and index buffers need hardware backing. Constant buffers
45 * do not. No other types of buffers currently supported.
48 svga_buffer_needs_hw_storage(unsigned usage
)
50 return usage
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
);
55 * Create a buffer transfer.
57 * Unlike texture DMAs (which are written immediately to the command buffer and
58 * therefore inherently serialized with other context operations), for buffers
59 * we try to coalesce multiple range mappings (i.e, multiple calls to this
60 * function) into a single DMA command, for better efficiency in command
61 * processing. This means we need to exercise extra care here to ensure that
62 * the end result is exactly the same as if one DMA was used for every mapped
66 svga_buffer_transfer_map(struct pipe_context
*pipe
,
67 struct pipe_resource
*resource
,
70 const struct pipe_box
*box
,
71 struct pipe_transfer
**ptransfer
)
73 struct svga_context
*svga
= svga_context(pipe
);
74 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
75 struct svga_buffer
*sbuf
= svga_buffer(resource
);
76 struct pipe_transfer
*transfer
;
79 transfer
= CALLOC_STRUCT(pipe_transfer
);
80 if (transfer
== NULL
) {
84 transfer
->resource
= resource
;
85 transfer
->level
= level
;
86 transfer
->usage
= usage
;
89 if (usage
& PIPE_TRANSFER_WRITE
) {
90 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
92 * Flush any pending primitives, finish writing any pending DMA
93 * commands, and tell the host to discard the buffer contents on
94 * the next DMA operation.
97 svga_hwtnl_flush_buffer(svga
, resource
);
99 if (sbuf
->dma
.pending
) {
100 svga_buffer_upload_flush(svga
, sbuf
);
103 * Instead of flushing the context command buffer, simply discard
104 * the current hwbuf, and start a new one.
107 svga_buffer_destroy_hw_storage(ss
, sbuf
);
110 sbuf
->map
.num_ranges
= 0;
111 sbuf
->dma
.flags
.discard
= TRUE
;
114 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
115 if (!sbuf
->map
.num_ranges
) {
117 * No pending ranges to upload so far, so we can tell the host to
118 * not synchronize on the next DMA command.
121 sbuf
->dma
.flags
.unsynchronized
= TRUE
;
125 * Synchronizing, so flush any pending primitives, finish writing any
126 * pending DMA command, and ensure the next DMA will be done in order.
129 svga_hwtnl_flush_buffer(svga
, resource
);
131 if (sbuf
->dma
.pending
) {
132 svga_buffer_upload_flush(svga
, sbuf
);
136 * We have a pending DMA upload from a hardware buffer, therefore
137 * we need to ensure that the host finishes processing that DMA
138 * command before the state tracker can start overwriting the
141 * XXX: This could be avoided by tying the hardware buffer to
142 * the transfer (just as done with textures), which would allow
143 * overlapping DMAs commands to be queued on the same context
144 * buffer. However, due to the likelihood of software vertex
145 * processing, it is more convenient to hold on to the hardware
146 * buffer, allowing to quickly access the contents from the CPU
147 * without having to do a DMA download from the host.
150 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
152 * Flushing the command buffer here will most likely cause
153 * the map of the hwbuf below to block, so preemptively
154 * return NULL here if DONTBLOCK is set to prevent unnecessary
155 * command buffer flushes.
162 svga_context_flush(svga
, NULL
);
166 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
170 if (!sbuf
->swbuf
&& !sbuf
->hwbuf
) {
171 if (svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
) {
173 * We can't create a hardware buffer big enough, so create a malloc
177 debug_printf("%s: failed to allocate %u KB of DMA, "
178 "splitting DMA transfers\n",
180 (sbuf
->b
.b
.width0
+ 1023)/1024);
183 sbuf
->swbuf
= align_malloc(sbuf
->b
.b
.width0
, 16);
192 /* User/malloc buffer */
195 else if (sbuf
->hwbuf
) {
196 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
197 struct svga_winsys_screen
*sws
= ss
->sws
;
199 map
= sws
->buffer_map(sws
, sbuf
->hwbuf
, transfer
->usage
);
207 map
+= transfer
->box
.x
;
208 *ptransfer
= transfer
;
218 svga_buffer_transfer_flush_region( struct pipe_context
*pipe
,
219 struct pipe_transfer
*transfer
,
220 const struct pipe_box
*box
)
222 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
223 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
225 unsigned offset
= transfer
->box
.x
+ box
->x
;
226 unsigned length
= box
->width
;
228 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
229 assert(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
);
231 pipe_mutex_lock(ss
->swc_mutex
);
232 svga_buffer_add_range(sbuf
, offset
, offset
+ length
);
233 pipe_mutex_unlock(ss
->swc_mutex
);
238 svga_buffer_transfer_unmap( struct pipe_context
*pipe
,
239 struct pipe_transfer
*transfer
)
241 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
242 struct svga_winsys_screen
*sws
= ss
->sws
;
243 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
245 pipe_mutex_lock(ss
->swc_mutex
);
247 assert(sbuf
->map
.count
);
248 if (sbuf
->map
.count
) {
253 sws
->buffer_unmap(sws
, sbuf
->hwbuf
);
256 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
257 if (!(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
259 * Mapped range not flushed explicitly, so flush the whole buffer,
260 * and tell the host to discard the contents when processing the DMA
264 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
266 sbuf
->dma
.flags
.discard
= TRUE
;
268 svga_buffer_add_range(sbuf
, 0, sbuf
->b
.b
.width0
);
272 pipe_mutex_unlock(ss
->swc_mutex
);
278 svga_buffer_destroy( struct pipe_screen
*screen
,
279 struct pipe_resource
*buf
)
281 struct svga_screen
*ss
= svga_screen(screen
);
282 struct svga_buffer
*sbuf
= svga_buffer( buf
);
284 assert(!p_atomic_read(&buf
->reference
.count
));
286 assert(!sbuf
->dma
.pending
);
289 svga_buffer_destroy_host_surface(ss
, sbuf
);
291 if(sbuf
->uploaded
.buffer
)
292 pipe_resource_reference(&sbuf
->uploaded
.buffer
, NULL
);
295 svga_buffer_destroy_hw_storage(ss
, sbuf
);
297 if(sbuf
->swbuf
&& !sbuf
->user
)
298 align_free(sbuf
->swbuf
);
304 struct u_resource_vtbl svga_buffer_vtbl
=
306 u_default_resource_get_handle
, /* get_handle */
307 svga_buffer_destroy
, /* resource_destroy */
308 svga_buffer_transfer_map
, /* transfer_map */
309 svga_buffer_transfer_flush_region
, /* transfer_flush_region */
310 svga_buffer_transfer_unmap
, /* transfer_unmap */
311 u_default_transfer_inline_write
/* transfer_inline_write */
316 struct pipe_resource
*
317 svga_buffer_create(struct pipe_screen
*screen
,
318 const struct pipe_resource
*template)
320 struct svga_screen
*ss
= svga_screen(screen
);
321 struct svga_buffer
*sbuf
;
323 sbuf
= CALLOC_STRUCT(svga_buffer
);
327 sbuf
->b
.b
= *template;
328 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
329 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
330 sbuf
->b
.b
.screen
= screen
;
332 if(svga_buffer_needs_hw_storage(template->bind
)) {
333 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
337 sbuf
->swbuf
= align_malloc(template->width0
, 64);
342 debug_reference(&sbuf
->b
.b
.reference
,
343 (debug_reference_descriptor
)debug_describe_resource
, 0);
353 struct pipe_resource
*
354 svga_user_buffer_create(struct pipe_screen
*screen
,
359 struct svga_buffer
*sbuf
;
361 sbuf
= CALLOC_STRUCT(svga_buffer
);
365 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
366 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
367 sbuf
->b
.b
.screen
= screen
;
368 sbuf
->b
.b
.format
= PIPE_FORMAT_R8_UNORM
; /* ?? */
369 sbuf
->b
.b
.usage
= PIPE_USAGE_IMMUTABLE
;
370 sbuf
->b
.b
.bind
= bind
;
371 sbuf
->b
.b
.width0
= bytes
;
372 sbuf
->b
.b
.height0
= 1;
373 sbuf
->b
.b
.depth0
= 1;
374 sbuf
->b
.b
.array_size
= 1;
379 debug_reference(&sbuf
->b
.b
.reference
,
380 (debug_reference_descriptor
)debug_describe_resource
, 0);