1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
44 * Vertex and index buffers need hardware backing. Constant buffers
45 * do not. No other types of buffers currently supported.
48 svga_buffer_needs_hw_storage(unsigned usage
)
50 return usage
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
);
55 * Create a buffer transfer.
57 * Unlike texture DMAs (which are written immediately to the command buffer and
58 * therefore inherently serialized with other context operations), for buffers
59 * we try to coalesce multiple range mappings (i.e, multiple calls to this
60 * function) into a single DMA command, for better efficiency in command
61 * processing. This means we need to exercise extra care here to ensure that
62 * the end result is exactly the same as if one DMA was used for every mapped
65 static struct pipe_transfer
*
66 svga_buffer_get_transfer(struct pipe_context
*pipe
,
67 struct pipe_resource
*resource
,
70 const struct pipe_box
*box
)
72 struct svga_context
*svga
= svga_context(pipe
);
73 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
74 struct svga_buffer
*sbuf
= svga_buffer(resource
);
75 struct pipe_transfer
*transfer
;
77 transfer
= CALLOC_STRUCT(pipe_transfer
);
78 if (transfer
== NULL
) {
82 transfer
->resource
= resource
;
83 transfer
->level
= level
;
84 transfer
->usage
= usage
;
87 if (usage
& PIPE_TRANSFER_WRITE
) {
88 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
90 * Finish writing any pending DMA commands, and tell the host to discard
91 * the buffer contents on the next DMA operation.
94 if (sbuf
->dma
.pending
) {
95 svga_buffer_upload_flush(svga
, sbuf
);
98 * Instead of flushing the context command buffer, simply discard
99 * the current hwbuf, and start a new one.
102 svga_buffer_destroy_hw_storage(ss
, sbuf
);
105 sbuf
->map
.num_ranges
= 0;
106 sbuf
->dma
.flags
.discard
= TRUE
;
109 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
110 if (!sbuf
->map
.num_ranges
) {
112 * No pending ranges to upload so far, so we can tell the host to
113 * not synchronize on the next DMA command.
116 sbuf
->dma
.flags
.unsynchronized
= TRUE
;
120 * Synchronizing, so finish writing any pending DMA command, and
121 * ensure the next DMA will be done in order.
124 if (sbuf
->dma
.pending
) {
125 svga_buffer_upload_flush(svga
, sbuf
);
129 * We have a pending DMA upload from a hardware buffer, therefore
130 * we need to ensure that the host finishes processing that DMA
131 * command before the state tracker can start overwriting the
134 * XXX: This could be avoided by tying the hardware buffer to
135 * the transfer (just as done with textures), which would allow
136 * overlapping DMAs commands to be queued on the same context
137 * buffer. However, due to the likelihood of software vertex
138 * processing, it is more convenient to hold on to the hardware
139 * buffer, allowing to quickly access the contents from the CPU
140 * without having to do a DMA download from the host.
143 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
145 * Flushing the command buffer here will most likely cause
146 * the map of the hwbuf below to block, so preemptively
147 * return NULL here if DONTBLOCK is set to prevent unnecessary
148 * command buffer flushes.
155 svga_context_flush(svga
, NULL
);
159 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
163 if (!sbuf
->swbuf
&& !sbuf
->hwbuf
) {
164 if (svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
) {
166 * We can't create a hardware buffer big enough, so create a malloc
170 debug_printf("%s: failed to allocate %u KB of DMA, "
171 "splitting DMA transfers\n",
173 (sbuf
->b
.b
.width0
+ 1023)/1024);
176 sbuf
->swbuf
= align_malloc(sbuf
->b
.b
.width0
, 16);
189 * Map a range of a buffer.
192 svga_buffer_transfer_map( struct pipe_context
*pipe
,
193 struct pipe_transfer
*transfer
)
195 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
200 /* User/malloc buffer */
203 else if (sbuf
->hwbuf
) {
204 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
205 struct svga_winsys_screen
*sws
= ss
->sws
;
207 map
= sws
->buffer_map(sws
, sbuf
->hwbuf
, transfer
->usage
);
215 map
+= transfer
->box
.x
;
223 svga_buffer_transfer_flush_region( struct pipe_context
*pipe
,
224 struct pipe_transfer
*transfer
,
225 const struct pipe_box
*box
)
227 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
228 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
230 unsigned offset
= transfer
->box
.x
+ box
->x
;
231 unsigned length
= box
->width
;
233 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
234 assert(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
);
236 pipe_mutex_lock(ss
->swc_mutex
);
237 svga_buffer_add_range(sbuf
, offset
, offset
+ length
);
238 pipe_mutex_unlock(ss
->swc_mutex
);
243 svga_buffer_transfer_unmap( struct pipe_context
*pipe
,
244 struct pipe_transfer
*transfer
)
246 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
247 struct svga_winsys_screen
*sws
= ss
->sws
;
248 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
250 pipe_mutex_lock(ss
->swc_mutex
);
252 assert(sbuf
->map
.count
);
253 if (sbuf
->map
.count
) {
258 sws
->buffer_unmap(sws
, sbuf
->hwbuf
);
261 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
262 if (!(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
264 * Mapped range not flushed explicitly, so flush the whole buffer,
265 * and tell the host to discard the contents when processing the DMA
269 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
271 sbuf
->dma
.flags
.discard
= TRUE
;
273 svga_buffer_add_range(sbuf
, 0, sbuf
->b
.b
.width0
);
277 pipe_mutex_unlock(ss
->swc_mutex
);
285 svga_buffer_transfer_destroy(struct pipe_context
*pipe
,
286 struct pipe_transfer
*transfer
)
293 svga_buffer_destroy( struct pipe_screen
*screen
,
294 struct pipe_resource
*buf
)
296 struct svga_screen
*ss
= svga_screen(screen
);
297 struct svga_buffer
*sbuf
= svga_buffer( buf
);
299 assert(!p_atomic_read(&buf
->reference
.count
));
301 assert(!sbuf
->dma
.pending
);
304 svga_buffer_destroy_host_surface(ss
, sbuf
);
306 if(sbuf
->uploaded
.buffer
)
307 pipe_resource_reference(&sbuf
->uploaded
.buffer
, NULL
);
310 svga_buffer_destroy_hw_storage(ss
, sbuf
);
312 if(sbuf
->swbuf
&& !sbuf
->user
)
313 align_free(sbuf
->swbuf
);
319 struct u_resource_vtbl svga_buffer_vtbl
=
321 u_default_resource_get_handle
, /* get_handle */
322 svga_buffer_destroy
, /* resource_destroy */
323 svga_buffer_get_transfer
, /* get_transfer */
324 svga_buffer_transfer_destroy
, /* transfer_destroy */
325 svga_buffer_transfer_map
, /* transfer_map */
326 svga_buffer_transfer_flush_region
, /* transfer_flush_region */
327 svga_buffer_transfer_unmap
, /* transfer_unmap */
328 u_default_transfer_inline_write
/* transfer_inline_write */
333 struct pipe_resource
*
334 svga_buffer_create(struct pipe_screen
*screen
,
335 const struct pipe_resource
*template)
337 struct svga_screen
*ss
= svga_screen(screen
);
338 struct svga_buffer
*sbuf
;
340 sbuf
= CALLOC_STRUCT(svga_buffer
);
344 sbuf
->b
.b
= *template;
345 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
346 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
347 sbuf
->b
.b
.screen
= screen
;
349 if(svga_buffer_needs_hw_storage(template->bind
)) {
350 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
354 sbuf
->swbuf
= align_malloc(template->width0
, 64);
359 debug_reference(&sbuf
->b
.b
.reference
,
360 (debug_reference_descriptor
)debug_describe_resource
, 0);
370 struct pipe_resource
*
371 svga_user_buffer_create(struct pipe_screen
*screen
,
376 struct svga_buffer
*sbuf
;
378 sbuf
= CALLOC_STRUCT(svga_buffer
);
382 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
383 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
384 sbuf
->b
.b
.screen
= screen
;
385 sbuf
->b
.b
.format
= PIPE_FORMAT_R8_UNORM
; /* ?? */
386 sbuf
->b
.b
.usage
= PIPE_USAGE_IMMUTABLE
;
387 sbuf
->b
.b
.bind
= bind
;
388 sbuf
->b
.b
.width0
= bytes
;
389 sbuf
->b
.b
.height0
= 1;
390 sbuf
->b
.b
.depth0
= 1;
391 sbuf
->b
.b
.array_size
= 1;
396 debug_reference(&sbuf
->b
.b
.reference
,
397 (debug_reference_descriptor
)debug_describe_resource
, 0);