1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
44 * Vertex and index buffers need hardware backing. Constant buffers
45 * do not. No other types of buffers currently supported.
48 svga_buffer_needs_hw_storage(unsigned usage
)
50 return usage
& (PIPE_BIND_VERTEX_BUFFER
| PIPE_BIND_INDEX_BUFFER
);
55 * Create a buffer transfer.
57 * Unlike texture DMAs (which are written immediately to the command buffer and
58 * therefore inherently serialized with other context operations), for buffers
59 * we try to coalesce multiple range mappings (i.e, multiple calls to this
60 * function) into a single DMA command, for better efficiency in command
61 * processing. This means we need to exercise extra care here to ensure that
62 * the end result is exactly the same as if one DMA was used for every mapped
65 static struct pipe_transfer
*
66 svga_buffer_get_transfer(struct pipe_context
*pipe
,
67 struct pipe_resource
*resource
,
70 const struct pipe_box
*box
)
72 struct svga_context
*svga
= svga_context(pipe
);
73 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
74 struct svga_buffer
*sbuf
= svga_buffer(resource
);
75 struct pipe_transfer
*transfer
;
77 transfer
= CALLOC_STRUCT(pipe_transfer
);
78 if (transfer
== NULL
) {
82 transfer
->resource
= resource
;
83 transfer
->level
= level
;
84 transfer
->usage
= usage
;
87 if (usage
& PIPE_TRANSFER_WRITE
) {
88 if (usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
90 * Flush any pending primitives, finish writing any pending DMA
91 * commands, and tell the host to discard the buffer contents on
92 * the next DMA operation.
95 svga_hwtnl_flush_buffer(svga
, resource
);
97 if (sbuf
->dma
.pending
) {
98 svga_buffer_upload_flush(svga
, sbuf
);
101 * Instead of flushing the context command buffer, simply discard
102 * the current hwbuf, and start a new one.
105 svga_buffer_destroy_hw_storage(ss
, sbuf
);
108 sbuf
->map
.num_ranges
= 0;
109 sbuf
->dma
.flags
.discard
= TRUE
;
112 if (usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
113 if (!sbuf
->map
.num_ranges
) {
115 * No pending ranges to upload so far, so we can tell the host to
116 * not synchronize on the next DMA command.
119 sbuf
->dma
.flags
.unsynchronized
= TRUE
;
123 * Synchronizing, so flush any pending primitives, finish writing any
124 * pending DMA command, and ensure the next DMA will be done in order.
127 svga_hwtnl_flush_buffer(svga
, resource
);
129 if (sbuf
->dma
.pending
) {
130 svga_buffer_upload_flush(svga
, sbuf
);
134 * We have a pending DMA upload from a hardware buffer, therefore
135 * we need to ensure that the host finishes processing that DMA
136 * command before the state tracker can start overwriting the
139 * XXX: This could be avoided by tying the hardware buffer to
140 * the transfer (just as done with textures), which would allow
141 * overlapping DMAs commands to be queued on the same context
142 * buffer. However, due to the likelihood of software vertex
143 * processing, it is more convenient to hold on to the hardware
144 * buffer, allowing to quickly access the contents from the CPU
145 * without having to do a DMA download from the host.
148 if (usage
& PIPE_TRANSFER_DONTBLOCK
) {
150 * Flushing the command buffer here will most likely cause
151 * the map of the hwbuf below to block, so preemptively
152 * return NULL here if DONTBLOCK is set to prevent unnecessary
153 * command buffer flushes.
160 svga_context_flush(svga
, NULL
);
164 sbuf
->dma
.flags
.unsynchronized
= FALSE
;
168 if (!sbuf
->swbuf
&& !sbuf
->hwbuf
) {
169 if (svga_buffer_create_hw_storage(ss
, sbuf
) != PIPE_OK
) {
171 * We can't create a hardware buffer big enough, so create a malloc
175 debug_printf("%s: failed to allocate %u KB of DMA, "
176 "splitting DMA transfers\n",
178 (sbuf
->b
.b
.width0
+ 1023)/1024);
181 sbuf
->swbuf
= align_malloc(sbuf
->b
.b
.width0
, 16);
194 * Map a range of a buffer.
197 svga_buffer_transfer_map( struct pipe_context
*pipe
,
198 struct pipe_transfer
*transfer
)
200 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
205 /* User/malloc buffer */
208 else if (sbuf
->hwbuf
) {
209 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
210 struct svga_winsys_screen
*sws
= ss
->sws
;
212 map
= sws
->buffer_map(sws
, sbuf
->hwbuf
, transfer
->usage
);
220 map
+= transfer
->box
.x
;
228 svga_buffer_transfer_flush_region( struct pipe_context
*pipe
,
229 struct pipe_transfer
*transfer
,
230 const struct pipe_box
*box
)
232 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
233 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
235 unsigned offset
= transfer
->box
.x
+ box
->x
;
236 unsigned length
= box
->width
;
238 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
239 assert(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
);
241 pipe_mutex_lock(ss
->swc_mutex
);
242 svga_buffer_add_range(sbuf
, offset
, offset
+ length
);
243 pipe_mutex_unlock(ss
->swc_mutex
);
248 svga_buffer_transfer_unmap( struct pipe_context
*pipe
,
249 struct pipe_transfer
*transfer
)
251 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
252 struct svga_winsys_screen
*sws
= ss
->sws
;
253 struct svga_buffer
*sbuf
= svga_buffer(transfer
->resource
);
255 pipe_mutex_lock(ss
->swc_mutex
);
257 assert(sbuf
->map
.count
);
258 if (sbuf
->map
.count
) {
263 sws
->buffer_unmap(sws
, sbuf
->hwbuf
);
266 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
267 if (!(transfer
->usage
& PIPE_TRANSFER_FLUSH_EXPLICIT
)) {
269 * Mapped range not flushed explicitly, so flush the whole buffer,
270 * and tell the host to discard the contents when processing the DMA
274 SVGA_DBG(DEBUG_DMA
, "flushing the whole buffer\n");
276 sbuf
->dma
.flags
.discard
= TRUE
;
278 svga_buffer_add_range(sbuf
, 0, sbuf
->b
.b
.width0
);
282 pipe_mutex_unlock(ss
->swc_mutex
);
290 svga_buffer_transfer_destroy(struct pipe_context
*pipe
,
291 struct pipe_transfer
*transfer
)
298 svga_buffer_destroy( struct pipe_screen
*screen
,
299 struct pipe_resource
*buf
)
301 struct svga_screen
*ss
= svga_screen(screen
);
302 struct svga_buffer
*sbuf
= svga_buffer( buf
);
304 assert(!p_atomic_read(&buf
->reference
.count
));
306 assert(!sbuf
->dma
.pending
);
309 svga_buffer_destroy_host_surface(ss
, sbuf
);
311 if(sbuf
->uploaded
.buffer
)
312 pipe_resource_reference(&sbuf
->uploaded
.buffer
, NULL
);
315 svga_buffer_destroy_hw_storage(ss
, sbuf
);
317 if(sbuf
->swbuf
&& !sbuf
->user
)
318 align_free(sbuf
->swbuf
);
324 struct u_resource_vtbl svga_buffer_vtbl
=
326 u_default_resource_get_handle
, /* get_handle */
327 svga_buffer_destroy
, /* resource_destroy */
328 svga_buffer_get_transfer
, /* get_transfer */
329 svga_buffer_transfer_destroy
, /* transfer_destroy */
330 svga_buffer_transfer_map
, /* transfer_map */
331 svga_buffer_transfer_flush_region
, /* transfer_flush_region */
332 svga_buffer_transfer_unmap
, /* transfer_unmap */
333 u_default_transfer_inline_write
/* transfer_inline_write */
338 struct pipe_resource
*
339 svga_buffer_create(struct pipe_screen
*screen
,
340 const struct pipe_resource
*template)
342 struct svga_screen
*ss
= svga_screen(screen
);
343 struct svga_buffer
*sbuf
;
345 sbuf
= CALLOC_STRUCT(svga_buffer
);
349 sbuf
->b
.b
= *template;
350 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
351 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
352 sbuf
->b
.b
.screen
= screen
;
354 if(svga_buffer_needs_hw_storage(template->bind
)) {
355 if(svga_buffer_create_host_surface(ss
, sbuf
) != PIPE_OK
)
359 sbuf
->swbuf
= align_malloc(template->width0
, 64);
364 debug_reference(&sbuf
->b
.b
.reference
,
365 (debug_reference_descriptor
)debug_describe_resource
, 0);
375 struct pipe_resource
*
376 svga_user_buffer_create(struct pipe_screen
*screen
,
381 struct svga_buffer
*sbuf
;
383 sbuf
= CALLOC_STRUCT(svga_buffer
);
387 pipe_reference_init(&sbuf
->b
.b
.reference
, 1);
388 sbuf
->b
.vtbl
= &svga_buffer_vtbl
;
389 sbuf
->b
.b
.screen
= screen
;
390 sbuf
->b
.b
.format
= PIPE_FORMAT_R8_UNORM
; /* ?? */
391 sbuf
->b
.b
.usage
= PIPE_USAGE_IMMUTABLE
;
392 sbuf
->b
.b
.bind
= bind
;
393 sbuf
->b
.b
.width0
= bytes
;
394 sbuf
->b
.b
.height0
= 1;
395 sbuf
->b
.b
.depth0
= 1;
396 sbuf
->b
.b
.array_size
= 1;
401 debug_reference(&sbuf
->b
.b
.reference
,
402 (debug_reference_descriptor
)debug_describe_resource
, 0);