}
+/**
+ * Map a range of a buffer.
+ *
+ * Unlike texture DMAs (which are written immediately to the command buffer and
+ * therefore inherently serialized with other context operations), for buffers
+ * we try to coalesce multiple range mappings (i.e, multiple calls to this
+ * function) into a single DMA command, for better efficiency in command
+ * processing. This means we need to exercise extra care here to ensure that
+ * the end result is exactly the same as if one DMA was used for every mapped
+ * range.
+ */
static void *
-svga_buffer_map_range( struct pipe_screen *screen,
+svga_buffer_map_range( struct pipe_context *pipe,
struct pipe_resource *buf,
unsigned offset,
unsigned length,
unsigned usage )
{
- struct svga_screen *ss = svga_screen(screen);
+ struct svga_context *svga = svga_context(pipe);
+ struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_winsys_screen *sws = ss->sws;
struct svga_buffer *sbuf = svga_buffer( buf );
void *map;
+ if (usage & PIPE_TRANSFER_WRITE) {
+ if (usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
+ /*
+ * Finish writing any pending DMA commands, and tell the host to discard
+ * the buffer contents on the next DMA operation.
+ */
+
+ if (sbuf->dma.pending) {
+ svga_buffer_upload_flush(svga, sbuf);
+
+ /*
+ * Instead of flushing the context command buffer, simply discard
+ * the current hwbuf, and start a new one.
+ */
+
+ svga_buffer_destroy_hw_storage(ss, sbuf);
+ }
+
+ sbuf->map.num_ranges = 0;
+ sbuf->dma.flags.discard = TRUE;
+ }
+
+ if (usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
+ if (!sbuf->map.num_ranges) {
+ /*
+ * No pending ranges to upload so far, so we can tell the host to
+ * not synchronize on the next DMA command.
+ */
+
+ sbuf->dma.flags.unsynchronized = TRUE;
+ }
+ } else {
+ /*
+ * Synchronizing, so finish writing any pending DMA command, and
+ * ensure the next DMA will be done in order.
+ */
+
+ if (sbuf->dma.pending) {
+ svga_buffer_upload_flush(svga, sbuf);
+
+ if (sbuf->hwbuf) {
+ /*
+ * We have a pending DMA upload from a hardware buffer, therefore
+ * we need to ensure that the host finishes processing that DMA
+ * command before the state tracker can start overwriting the
+ * hardware buffer.
+ *
+ * XXX: This could be avoided by tying the hardware buffer to
+ * the transfer (just as done with textures), which would allow
+ * overlapping DMAs commands to be queued on the same context
+ * buffer. However, due to the likelihood of software vertex
+ * processing, it is more convenient to hold on to the hardware
+ * buffer, allowing to quickly access the contents from the CPU
+ * without having to do a DMA download from the host.
+ */
+
+ if (usage & PIPE_TRANSFER_DONTBLOCK) {
+ /*
+ * Flushing the command buffer here will most likely cause
+ * the map of the hwbuf below to block, so preemptively
+ * return NULL here if DONTBLOCK is set to prevent unnecessary
+ * command buffer flushes.
+ */
+
+ return NULL;
+ }
+
+ svga_context_flush(svga, NULL);
+ }
+ }
+
+ sbuf->dma.flags.unsynchronized = FALSE;
+ }
+ }
+
if (!sbuf->swbuf && !sbuf->hwbuf) {
if (svga_buffer_create_hw_storage(ss, sbuf) != PIPE_OK) {
/*
static void
-svga_buffer_flush_mapped_range( struct pipe_screen *screen,
+svga_buffer_flush_mapped_range( struct pipe_context *pipe,
struct pipe_resource *buf,
unsigned offset, unsigned length)
{
struct svga_buffer *sbuf = svga_buffer( buf );
- struct svga_screen *ss = svga_screen(screen);
+ struct svga_screen *ss = svga_screen(pipe->screen);
pipe_mutex_lock(ss->swc_mutex);
assert(sbuf->map.writing);
}
static void
-svga_buffer_unmap( struct pipe_screen *screen,
+svga_buffer_unmap( struct pipe_context *pipe,
struct pipe_resource *buf)
{
- struct svga_screen *ss = svga_screen(screen);
+ struct svga_screen *ss = svga_screen(pipe->screen);
struct svga_winsys_screen *sws = ss->sws;
struct svga_buffer *sbuf = svga_buffer( buf );
svga_buffer_transfer_map( struct pipe_context *pipe,
struct pipe_transfer *transfer )
{
- uint8_t *map = svga_buffer_map_range( pipe->screen,
+ uint8_t *map = svga_buffer_map_range( pipe,
transfer->resource,
transfer->box.x,
transfer->box.width,
{
assert(box->x + box->width <= transfer->box.width);
- svga_buffer_flush_mapped_range(pipe->screen,
+ svga_buffer_flush_mapped_range(pipe,
transfer->resource,
transfer->box.x + box->x,
box->width);
static void svga_buffer_transfer_unmap( struct pipe_context *pipe,
struct pipe_transfer *transfer )
{
- svga_buffer_unmap(pipe->screen,
+ svga_buffer_unmap(pipe,
transfer->resource);
}
* Patch up the upload DMA command reserved by svga_buffer_upload_command
* with the final ranges.
*/
-static void
+void
svga_buffer_upload_flush(struct svga_context *svga,
struct svga_buffer *sbuf)
{
unsigned i;
struct pipe_resource *dummy;
+ if (!sbuf->dma.pending) {
+ return;
+ }
+
assert(sbuf->handle);
assert(sbuf->hwbuf);
assert(sbuf->map.num_ranges);
sbuf->head.next = sbuf->head.prev = NULL;
#endif
sbuf->dma.pending = FALSE;
+ sbuf->dma.flags.discard = FALSE;
+ sbuf->dma.flags.unsynchronized = FALSE;
sbuf->dma.svga = NULL;
sbuf->dma.boxes = NULL;
}
-
/**
* Note a dirty range.
*
/*
* Try to grow one of the ranges.
- *
- * Note that it is not this function task to care about overlapping ranges,
- * as the GMR was already given so it is too late to do anything. Situations
- * where overlapping ranges may pose a problem should be detected via
- * pipe_context::is_resource_referenced and the context that refers to the
- * buffer should be flushed.
*/
for(i = 0; i < sbuf->map.num_ranges; ++i) {
if (dist <= 0) {
/*
* Ranges are contiguous or overlapping -- extend this one and return.
+ *
+ * Note that it is not this function's task to prevent overlapping
+ * ranges, as the GMR was already given so it is too late to do
+ * anything. If the ranges overlap here it must surely be because
+ * PIPE_TRANSFER_UNSYNCHRONIZED was set.
*/
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
* pending DMA upload and start clean.
*/
- if(sbuf->dma.pending)
- svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
+ svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
assert(!sbuf->dma.pending);
assert(!sbuf->dma.svga);