svga: update buffer code for GBS
[mesa.git] / src / gallium / drivers / svga / svga_resource_buffer_upload.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26
27 #include "os/os_thread.h"
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "util/u_math.h"
32 #include "util/u_memory.h"
33 #include "util/u_double_list.h"
34
35 #include "svga_cmd.h"
36 #include "svga_context.h"
37 #include "svga_debug.h"
38 #include "svga_resource_buffer.h"
39 #include "svga_resource_buffer_upload.h"
40 #include "svga_screen.h"
41 #include "svga_winsys.h"
42
43 /**
44 * Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
45 *
46 */
47 struct svga_3d_update_gb_image {
48 SVGA3dCmdHeader header;
49 SVGA3dCmdUpdateGBImage body;
50 };
51
52 struct svga_3d_invalidate_gb_image {
53 SVGA3dCmdHeader header;
54 SVGA3dCmdInvalidateGBImage body;
55 };
56
57
58 /**
59 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
60 *
61 * It will flush and retry in case the first attempt to create a DMA buffer
62 * fails, so it should not be called from any function involved in flushing
63 * to avoid recursion.
64 */
65 struct svga_winsys_buffer *
66 svga_winsys_buffer_create( struct svga_context *svga,
67 unsigned alignment,
68 unsigned usage,
69 unsigned size )
70 {
71 struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
72 struct svga_winsys_screen *sws = svgascreen->sws;
73 struct svga_winsys_buffer *buf;
74
75 /* Just try */
76 buf = sws->buffer_create(sws, alignment, usage, size);
77 if (!buf) {
78 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
79 size);
80
81 /* Try flushing all pending DMAs */
82 svga_context_flush(svga, NULL);
83 buf = sws->buffer_create(sws, alignment, usage, size);
84 }
85
86 return buf;
87 }
88
89
90 /**
91 * Destroy HW storage if separate from the host surface.
92 * In the GB case, the HW storage is associated with the host surface
93 * and is therefore a No-op.
94 */
95 void
96 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
97 {
98 struct svga_winsys_screen *sws = ss->sws;
99
100 assert(!sbuf->map.count);
101 assert(sbuf->hwbuf);
102 if (sbuf->hwbuf) {
103 sws->buffer_destroy(sws, sbuf->hwbuf);
104 sbuf->hwbuf = NULL;
105 }
106 }
107
108
109
110 /**
111 * Allocate DMA'ble or Updatable storage for the buffer.
112 *
113 * Called before mapping a buffer.
114 */
115 enum pipe_error
116 svga_buffer_create_hw_storage(struct svga_screen *ss,
117 struct svga_buffer *sbuf)
118 {
119 assert(!sbuf->user);
120
121 if (ss->sws->have_gb_objects) {
122 assert(sbuf->handle || !sbuf->dma.pending);
123 return svga_buffer_create_host_surface(ss, sbuf);
124 }
125 if (!sbuf->hwbuf) {
126 struct svga_winsys_screen *sws = ss->sws;
127 unsigned alignment = 16;
128 unsigned usage = 0;
129 unsigned size = sbuf->b.b.width0;
130
131 sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
132 if (!sbuf->hwbuf)
133 return PIPE_ERROR_OUT_OF_MEMORY;
134
135 assert(!sbuf->dma.pending);
136 }
137
138 return PIPE_OK;
139 }
140
141
142
143 enum pipe_error
144 svga_buffer_create_host_surface(struct svga_screen *ss,
145 struct svga_buffer *sbuf)
146 {
147 assert(!sbuf->user);
148
149 if (!sbuf->handle) {
150 sbuf->key.flags = 0;
151
152 sbuf->key.format = SVGA3D_BUFFER;
153 if (sbuf->b.b.bind & PIPE_BIND_VERTEX_BUFFER)
154 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
155 if (sbuf->b.b.bind & PIPE_BIND_INDEX_BUFFER)
156 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
157
158 sbuf->key.size.width = sbuf->b.b.width0;
159 sbuf->key.size.height = 1;
160 sbuf->key.size.depth = 1;
161
162 sbuf->key.numFaces = 1;
163 sbuf->key.numMipLevels = 1;
164 sbuf->key.cachable = 1;
165
166 SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
167
168 sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
169 if (!sbuf->handle)
170 return PIPE_ERROR_OUT_OF_MEMORY;
171
172 /* Always set the discard flag on the first time the buffer is written
173 * as svga_screen_surface_create might have passed a recycled host
174 * buffer.
175 */
176 sbuf->dma.flags.discard = TRUE;
177
178 SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
179 }
180
181 return PIPE_OK;
182 }
183
184
185 void
186 svga_buffer_destroy_host_surface(struct svga_screen *ss,
187 struct svga_buffer *sbuf)
188 {
189 if (sbuf->handle) {
190 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
191 svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
192 }
193 }
194
195
196 /**
197 * Insert a number of preliminary UPDATE_GB_IMAGE commands in the
198 * command buffer, equal to the current number of mapped ranges.
199 * The UPDATE_GB_IMAGE commands will be patched with the
200 * actual ranges just before flush.
201 */
202 static enum pipe_error
203 svga_buffer_upload_gb_command(struct svga_context *svga,
204 struct svga_buffer *sbuf)
205 {
206 struct svga_winsys_context *swc = svga->swc;
207 SVGA3dCmdUpdateGBImage *cmd;
208 struct svga_3d_update_gb_image *ccmd = NULL;
209 uint32 numBoxes = sbuf->map.num_ranges;
210 struct pipe_resource *dummy;
211 unsigned int i;
212
213 assert(numBoxes);
214 assert(sbuf->dma.updates == NULL);
215
216 if (sbuf->dma.flags.discard) {
217 struct svga_3d_invalidate_gb_image *cicmd = NULL;
218 SVGA3dCmdInvalidateGBImage *icmd;
219
220 /* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
221 * 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
222 * than with separate commands because we need to properly deal with
223 * filling the command buffer.
224 */
225 icmd = SVGA3D_FIFOReserve(swc,
226 SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
227 sizeof *icmd + numBoxes * sizeof *ccmd,
228 2);
229 if (!icmd)
230 return PIPE_ERROR_OUT_OF_MEMORY;
231
232 cicmd = container_of(icmd, cicmd, body);
233 cicmd->header.size = sizeof *icmd;
234 swc->surface_relocation(swc, &icmd->image.sid, NULL, sbuf->handle,
235 (SVGA_RELOC_WRITE |
236 SVGA_RELOC_INTERNAL |
237 SVGA_RELOC_DMA));
238 icmd->image.face = 0;
239 icmd->image.mipmap = 0;
240
241 /* initialize the first UPDATE_GB_IMAGE command */
242 ccmd = (struct svga_3d_update_gb_image *) &icmd[1];
243 ccmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
244 cmd = &ccmd->body;
245
246 } else {
247 /* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
248 cmd = SVGA3D_FIFOReserve(swc,
249 SVGA_3D_CMD_UPDATE_GB_IMAGE,
250 sizeof *cmd + (numBoxes - 1) * sizeof *ccmd,
251 1);
252 if (!cmd)
253 return PIPE_ERROR_OUT_OF_MEMORY;
254
255 ccmd = container_of(cmd, ccmd, body);
256 }
257
258 /* Init the first UPDATE_GB_IMAGE command */
259 ccmd->header.size = sizeof *cmd;
260 swc->surface_relocation(swc, &cmd->image.sid, NULL, sbuf->handle,
261 SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
262 cmd->image.face = 0;
263 cmd->image.mipmap = 0;
264
265 /* Save pointer to the first UPDATE_GB_IMAGE command so that we can
266 * fill in the box info below.
267 */
268 sbuf->dma.updates = ccmd;
269
270 /*
271 * Copy the relocation info, face and mipmap to all
272 * subsequent commands. NOTE: For winsyses that actually
273 * patch the image.sid member at flush time, this will fail
274 * miserably. For those we need to add as many relocations
275 * as there are copy boxes.
276 */
277
278 for (i = 1; i < numBoxes; ++i) {
279 memcpy(++ccmd, sbuf->dma.updates, sizeof *ccmd);
280 }
281
282 /* Increment reference count */
283 sbuf->dma.svga = svga;
284 dummy = NULL;
285 pipe_resource_reference(&dummy, &sbuf->b.b);
286 SVGA_FIFOCommitAll(swc);
287
288 sbuf->dma.flags.discard = FALSE;
289
290 return PIPE_OK;
291 }
292
293
294 /**
295 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
296 */
297 static enum pipe_error
298 svga_buffer_upload_command(struct svga_context *svga,
299 struct svga_buffer *sbuf)
300 {
301 struct svga_winsys_context *swc = svga->swc;
302 struct svga_winsys_buffer *guest = sbuf->hwbuf;
303 struct svga_winsys_surface *host = sbuf->handle;
304 SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
305 SVGA3dCmdSurfaceDMA *cmd;
306 uint32 numBoxes = sbuf->map.num_ranges;
307 SVGA3dCopyBox *boxes;
308 SVGA3dCmdSurfaceDMASuffix *pSuffix;
309 unsigned region_flags;
310 unsigned surface_flags;
311 struct pipe_resource *dummy;
312
313 if (svga_have_gb_objects(svga))
314 return svga_buffer_upload_gb_command(svga, sbuf);
315
316 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
317 region_flags = SVGA_RELOC_READ;
318 surface_flags = SVGA_RELOC_WRITE;
319 }
320 else if (transfer == SVGA3D_READ_HOST_VRAM) {
321 region_flags = SVGA_RELOC_WRITE;
322 surface_flags = SVGA_RELOC_READ;
323 }
324 else {
325 assert(0);
326 return PIPE_ERROR_BAD_INPUT;
327 }
328
329 assert(numBoxes);
330
331 cmd = SVGA3D_FIFOReserve(swc,
332 SVGA_3D_CMD_SURFACE_DMA,
333 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
334 2);
335 if (!cmd)
336 return PIPE_ERROR_OUT_OF_MEMORY;
337
338 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
339 cmd->guest.pitch = 0;
340
341 swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
342 cmd->host.face = 0;
343 cmd->host.mipmap = 0;
344
345 cmd->transfer = transfer;
346
347 sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
348 sbuf->dma.svga = svga;
349
350 /* Increment reference count */
351 dummy = NULL;
352 pipe_resource_reference(&dummy, &sbuf->b.b);
353
354 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
355 pSuffix->suffixSize = sizeof *pSuffix;
356 pSuffix->maximumOffset = sbuf->b.b.width0;
357 pSuffix->flags = sbuf->dma.flags;
358
359 SVGA_FIFOCommitAll(swc);
360
361 sbuf->dma.flags.discard = FALSE;
362
363 return PIPE_OK;
364 }
365
366
367 /**
368 * Patch up the upload DMA command reserved by svga_buffer_upload_command
369 * with the final ranges.
370 */
371 void
372 svga_buffer_upload_flush(struct svga_context *svga,
373 struct svga_buffer *sbuf)
374 {
375 unsigned i;
376 struct pipe_resource *dummy;
377
378 if (!sbuf->dma.pending) {
379 //debug_printf("no dma pending on buffer\n");
380 return;
381 }
382
383 assert(sbuf->handle);
384 assert(sbuf->map.num_ranges);
385 assert(sbuf->dma.svga == svga);
386
387 /*
388 * Patch the DMA/update command with the final copy box.
389 */
390 if (svga_have_gb_objects(svga)) {
391 struct svga_3d_update_gb_image *update = sbuf->dma.updates;
392 assert(update);
393
394 for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
395 SVGA3dBox *box = &update->body.box;
396
397 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
398 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
399
400 box->x = sbuf->map.ranges[i].start;
401 box->y = 0;
402 box->z = 0;
403 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
404 box->h = 1;
405 box->d = 1;
406
407 assert(box->x <= sbuf->b.b.width0);
408 assert(box->x + box->w <= sbuf->b.b.width0);
409 }
410 }
411 else {
412 assert(sbuf->hwbuf);
413 assert(sbuf->dma.boxes);
414 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
415
416 for (i = 0; i < sbuf->map.num_ranges; ++i) {
417 SVGA3dCopyBox *box = sbuf->dma.boxes + i;
418
419 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
420 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
421
422 box->x = sbuf->map.ranges[i].start;
423 box->y = 0;
424 box->z = 0;
425 box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
426 box->h = 1;
427 box->d = 1;
428 box->srcx = sbuf->map.ranges[i].start;
429 box->srcy = 0;
430 box->srcz = 0;
431
432 assert(box->x <= sbuf->b.b.width0);
433 assert(box->x + box->w <= sbuf->b.b.width0);
434 }
435 }
436
437 /* Reset sbuf for next use/upload */
438
439 sbuf->map.num_ranges = 0;
440
441 assert(sbuf->head.prev && sbuf->head.next);
442 LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */
443 #ifdef DEBUG
444 sbuf->head.next = sbuf->head.prev = NULL;
445 #endif
446 sbuf->dma.pending = FALSE;
447 sbuf->dma.flags.discard = FALSE;
448 sbuf->dma.flags.unsynchronized = FALSE;
449
450 sbuf->dma.svga = NULL;
451 sbuf->dma.boxes = NULL;
452 sbuf->dma.updates = NULL;
453
454 /* Decrement reference count (and potentially destroy) */
455 dummy = &sbuf->b.b;
456 pipe_resource_reference(&dummy, NULL);
457 }
458
459
460 /**
461 * Note a dirty range.
462 *
463 * This function only notes the range down. It doesn't actually emit a DMA
464 * upload command. That only happens when a context tries to refer to this
465 * buffer, and the DMA upload command is added to that context's command
466 * buffer.
467 *
468 * We try to lump as many contiguous DMA transfers together as possible.
469 */
470 void
471 svga_buffer_add_range(struct svga_buffer *sbuf,
472 unsigned start,
473 unsigned end)
474 {
475 unsigned i;
476 unsigned nearest_range;
477 unsigned nearest_dist;
478
479 assert(end > start);
480
481 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
482 nearest_range = sbuf->map.num_ranges;
483 nearest_dist = ~0;
484 } else {
485 nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
486 nearest_dist = 0;
487 }
488
489 /*
490 * Try to grow one of the ranges.
491 */
492
493 for (i = 0; i < sbuf->map.num_ranges; ++i) {
494 int left_dist;
495 int right_dist;
496 int dist;
497
498 left_dist = start - sbuf->map.ranges[i].end;
499 right_dist = sbuf->map.ranges[i].start - end;
500 dist = MAX2(left_dist, right_dist);
501
502 if (dist <= 0) {
503 /*
504 * Ranges are contiguous or overlapping -- extend this one and return.
505 *
506 * Note that it is not this function's task to prevent overlapping
507 * ranges, as the GMR was already given so it is too late to do
508 * anything. If the ranges overlap here it must surely be because
509 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
510 */
511
512 sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
513 sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
514 return;
515 }
516 else {
517 /*
518 * Discontiguous ranges -- keep track of the nearest range.
519 */
520
521 if (dist < nearest_dist) {
522 nearest_range = i;
523 nearest_dist = dist;
524 }
525 }
526 }
527
528 /*
529 * We cannot add a new range to an existing DMA command, so patch-up the
530 * pending DMA upload and start clean.
531 */
532
533 svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
534
535 assert(!sbuf->dma.pending);
536 assert(!sbuf->dma.svga);
537 assert(!sbuf->dma.boxes);
538
539 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
540 /*
541 * Add a new range.
542 */
543
544 sbuf->map.ranges[sbuf->map.num_ranges].start = start;
545 sbuf->map.ranges[sbuf->map.num_ranges].end = end;
546 ++sbuf->map.num_ranges;
547 } else {
548 /*
549 * Everything else failed, so just extend the nearest range.
550 *
551 * It is OK to do this because we always keep a local copy of the
552 * host buffer data, for SW TNL, and the host never modifies the buffer.
553 */
554
555 assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
556 assert(nearest_range < sbuf->map.num_ranges);
557 sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
558 sbuf->map.ranges[nearest_range].end = MAX2(sbuf->map.ranges[nearest_range].end, end);
559 }
560 }
561
562
563
564 /**
565 * Copy the contents of the malloc buffer to a hardware buffer.
566 */
567 static enum pipe_error
568 svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf)
569 {
570 assert(!sbuf->user);
571 if (!svga_buffer_has_hw_storage(sbuf)) {
572 struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
573 enum pipe_error ret;
574 boolean retry;
575 void *map;
576
577 assert(sbuf->swbuf);
578 if (!sbuf->swbuf)
579 return PIPE_ERROR;
580
581 ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen),
582 sbuf);
583 if (ret != PIPE_OK)
584 return ret;
585
586 pipe_mutex_lock(ss->swc_mutex);
587 map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
588 assert(map);
589 assert(!retry);
590 if (!map) {
591 pipe_mutex_unlock(ss->swc_mutex);
592 svga_buffer_destroy_hw_storage(ss, sbuf);
593 return PIPE_ERROR;
594 }
595
596 memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
597 svga_buffer_hw_storage_unmap(svga, sbuf);
598
599 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
600 assert(!sbuf->map.count);
601 if (!sbuf->map.count) {
602 if (sbuf->user)
603 sbuf->user = FALSE;
604 else
605 align_free(sbuf->swbuf);
606 sbuf->swbuf = NULL;
607 }
608
609 pipe_mutex_unlock(ss->swc_mutex);
610 }
611
612 return PIPE_OK;
613 }
614
615
616 /**
617 * Upload the buffer to the host in a piecewise fashion.
618 *
619 * Used when the buffer is too big to fit in the GMR aperture.
620 * This function should never get called in the guest-backed case
621 * since we always have a full-sized hardware storage backing the
622 * host surface.
623 */
624 static enum pipe_error
625 svga_buffer_upload_piecewise(struct svga_screen *ss,
626 struct svga_context *svga,
627 struct svga_buffer *sbuf)
628 {
629 struct svga_winsys_screen *sws = ss->sws;
630 const unsigned alignment = sizeof(void *);
631 const unsigned usage = 0;
632 unsigned i;
633
634 assert(sbuf->map.num_ranges);
635 assert(!sbuf->dma.pending);
636 assert(!svga_have_gb_objects(svga));
637
638 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
639
640 for (i = 0; i < sbuf->map.num_ranges; ++i) {
641 struct svga_buffer_range *range = &sbuf->map.ranges[i];
642 unsigned offset = range->start;
643 unsigned size = range->end - range->start;
644
645 while (offset < range->end) {
646 struct svga_winsys_buffer *hwbuf;
647 uint8_t *map;
648 enum pipe_error ret;
649
650 if (offset + size > range->end)
651 size = range->end - offset;
652
653 hwbuf = sws->buffer_create(sws, alignment, usage, size);
654 while (!hwbuf) {
655 size /= 2;
656 if (!size)
657 return PIPE_ERROR_OUT_OF_MEMORY;
658 hwbuf = sws->buffer_create(sws, alignment, usage, size);
659 }
660
661 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
662 offset, offset + size);
663
664 map = sws->buffer_map(sws, hwbuf,
665 PIPE_TRANSFER_WRITE |
666 PIPE_TRANSFER_DISCARD_RANGE);
667 assert(map);
668 if (map) {
669 memcpy(map, (const char *) sbuf->swbuf + offset, size);
670 sws->buffer_unmap(sws, hwbuf);
671 }
672
673 ret = SVGA3D_BufferDMA(svga->swc,
674 hwbuf, sbuf->handle,
675 SVGA3D_WRITE_HOST_VRAM,
676 size, 0, offset, sbuf->dma.flags);
677 if (ret != PIPE_OK) {
678 svga_context_flush(svga, NULL);
679 ret = SVGA3D_BufferDMA(svga->swc,
680 hwbuf, sbuf->handle,
681 SVGA3D_WRITE_HOST_VRAM,
682 size, 0, offset, sbuf->dma.flags);
683 assert(ret == PIPE_OK);
684 }
685
686 sbuf->dma.flags.discard = FALSE;
687
688 sws->buffer_destroy(sws, hwbuf);
689
690 offset += size;
691 }
692 }
693
694 sbuf->map.num_ranges = 0;
695
696 return PIPE_OK;
697 }
698
699
700 /**
701 * Get (or create/upload) the winsys surface handle so that we can
702 * refer to this buffer in fifo commands.
703 * This function will create the host surface, and in the GB case also the
704 * hardware storage. In the non-GB case, the hardware storage will be created
705 * if there are mapped ranges and the data is currently in a malloc'ed buffer.
706 */
707 struct svga_winsys_surface *
708 svga_buffer_handle(struct svga_context *svga,
709 struct pipe_resource *buf)
710 {
711 struct pipe_screen *screen = svga->pipe.screen;
712 struct svga_screen *ss = svga_screen(screen);
713 struct svga_buffer *sbuf;
714 enum pipe_error ret;
715
716 if (!buf)
717 return NULL;
718
719 sbuf = svga_buffer(buf);
720
721 assert(!sbuf->user);
722
723 if (!sbuf->handle) {
724 /* This call will set sbuf->handle */
725 if (svga_have_gb_objects(svga)) {
726 ret = svga_buffer_update_hw(svga, sbuf);
727 } else {
728 ret = svga_buffer_create_host_surface(ss, sbuf);
729 }
730 if (ret != PIPE_OK)
731 return NULL;
732 }
733
734 assert(sbuf->handle);
735
736 if (sbuf->map.num_ranges) {
737 if (!sbuf->dma.pending) {
738 /*
739 * No pending DMA upload yet, so insert a DMA upload command now.
740 */
741
742 /*
743 * Migrate the data from swbuf -> hwbuf if necessary.
744 */
745 ret = svga_buffer_update_hw(svga, sbuf);
746 if (ret == PIPE_OK) {
747 /*
748 * Queue a dma command.
749 */
750
751 ret = svga_buffer_upload_command(svga, sbuf);
752 if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
753 svga_context_flush(svga, NULL);
754 ret = svga_buffer_upload_command(svga, sbuf);
755 assert(ret == PIPE_OK);
756 }
757 if (ret == PIPE_OK) {
758 sbuf->dma.pending = TRUE;
759 assert(!sbuf->head.prev && !sbuf->head.next);
760 LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
761 }
762 }
763 else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
764 /*
765 * The buffer is too big to fit in the GMR aperture, so break it in
766 * smaller pieces.
767 */
768 ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
769 }
770
771 if (ret != PIPE_OK) {
772 /*
773 * Something unexpected happened above. There is very little that
774 * we can do other than proceeding while ignoring the dirty ranges.
775 */
776 assert(0);
777 sbuf->map.num_ranges = 0;
778 }
779 }
780 else {
781 /*
782 * There a pending dma already. Make sure it is from this context.
783 */
784 assert(sbuf->dma.svga == svga);
785 }
786 }
787
788 assert(!sbuf->map.num_ranges || sbuf->dma.pending);
789
790 return sbuf->handle;
791 }
792
793
794
795 void
796 svga_context_flush_buffers(struct svga_context *svga)
797 {
798 struct list_head *curr, *next;
799 struct svga_buffer *sbuf;
800
801 curr = svga->dirty_buffers.next;
802 next = curr->next;
803 while(curr != &svga->dirty_buffers) {
804 sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
805
806 assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
807 assert(sbuf->dma.pending);
808
809 svga_buffer_upload_flush(svga, sbuf);
810
811 curr = next;
812 next = curr->next;
813 }
814 }