svga: Ignore redefine_user_buffer calls
[mesa.git] / src / gallium / drivers / svga / svga_resource_buffer_upload.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga_cmd.h"
27
28 #include "pipe/p_state.h"
29 #include "pipe/p_defines.h"
30 #include "util/u_inlines.h"
31 #include "os/os_thread.h"
32 #include "util/u_math.h"
33 #include "util/u_memory.h"
34
35 #include "svga_context.h"
36 #include "svga_screen.h"
37 #include "svga_resource_buffer.h"
38 #include "svga_resource_buffer_upload.h"
39 #include "svga_winsys.h"
40 #include "svga_debug.h"
41
42
43 /**
44 * Allocate a winsys_buffer (ie. DMA, aka GMR memory).
45 *
46 * It will flush and retry in case the first attempt to create a DMA buffer
47 * fails, so it should not be called from any function involved in flushing
48 * to avoid recursion.
49 */
50 struct svga_winsys_buffer *
51 svga_winsys_buffer_create( struct svga_context *svga,
52 unsigned alignment,
53 unsigned usage,
54 unsigned size )
55 {
56 struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
57 struct svga_winsys_screen *sws = svgascreen->sws;
58 struct svga_winsys_buffer *buf;
59
60 /* Just try */
61 buf = sws->buffer_create(sws, alignment, usage, size);
62 if (!buf) {
63 SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
64 size);
65
66 /* Try flushing all pending DMAs */
67 svga_context_flush(svga, NULL);
68 buf = sws->buffer_create(sws, alignment, usage, size);
69 }
70
71 return buf;
72 }
73
74
75 void
76 svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
77 {
78 struct svga_winsys_screen *sws = ss->sws;
79
80 assert(!sbuf->map.count);
81 assert(sbuf->hwbuf);
82 if(sbuf->hwbuf) {
83 sws->buffer_destroy(sws, sbuf->hwbuf);
84 sbuf->hwbuf = NULL;
85 }
86 }
87
88
89
90 /**
91 * Allocate DMA'ble storage for the buffer.
92 *
93 * Called before mapping a buffer.
94 */
95 enum pipe_error
96 svga_buffer_create_hw_storage(struct svga_screen *ss,
97 struct svga_buffer *sbuf)
98 {
99 assert(!sbuf->user);
100
101 if(!sbuf->hwbuf) {
102 struct svga_winsys_screen *sws = ss->sws;
103 unsigned alignment = 16;
104 unsigned usage = 0;
105 unsigned size = sbuf->b.b.width0;
106
107 sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
108 if(!sbuf->hwbuf)
109 return PIPE_ERROR_OUT_OF_MEMORY;
110
111 assert(!sbuf->dma.pending);
112 }
113
114 return PIPE_OK;
115 }
116
117
118
119 enum pipe_error
120 svga_buffer_create_host_surface(struct svga_screen *ss,
121 struct svga_buffer *sbuf)
122 {
123 assert(!sbuf->user);
124
125 if(!sbuf->handle) {
126 sbuf->key.flags = 0;
127
128 sbuf->key.format = SVGA3D_BUFFER;
129 if(sbuf->b.b.bind & PIPE_BIND_VERTEX_BUFFER)
130 sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
131 if(sbuf->b.b.bind & PIPE_BIND_INDEX_BUFFER)
132 sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
133
134 sbuf->key.size.width = sbuf->b.b.width0;
135 sbuf->key.size.height = 1;
136 sbuf->key.size.depth = 1;
137
138 sbuf->key.numFaces = 1;
139 sbuf->key.numMipLevels = 1;
140 sbuf->key.cachable = 1;
141
142 SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n", sbuf->b.b.width0);
143
144 sbuf->handle = svga_screen_surface_create(ss, &sbuf->key);
145 if(!sbuf->handle)
146 return PIPE_ERROR_OUT_OF_MEMORY;
147
148 /* Always set the discard flag on the first time the buffer is written
149 * as svga_screen_surface_create might have passed a recycled host
150 * buffer.
151 */
152 sbuf->dma.flags.discard = TRUE;
153
154 SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n", sbuf->handle, sbuf->b.b.width0);
155 }
156
157 return PIPE_OK;
158 }
159
160
161 void
162 svga_buffer_destroy_host_surface(struct svga_screen *ss,
163 struct svga_buffer *sbuf)
164 {
165 if(sbuf->handle) {
166 SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n", sbuf->handle, sbuf->b.b.width0);
167 svga_screen_surface_destroy(ss, &sbuf->key, &sbuf->handle);
168 }
169 }
170
171
172 /**
173 * Variant of SVGA3D_BufferDMA which leaves the copy box temporarily in blank.
174 */
175 static enum pipe_error
176 svga_buffer_upload_command(struct svga_context *svga,
177 struct svga_buffer *sbuf)
178 {
179 struct svga_winsys_context *swc = svga->swc;
180 struct svga_winsys_buffer *guest = sbuf->hwbuf;
181 struct svga_winsys_surface *host = sbuf->handle;
182 SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
183 SVGA3dCmdSurfaceDMA *cmd;
184 uint32 numBoxes = sbuf->map.num_ranges;
185 SVGA3dCopyBox *boxes;
186 SVGA3dCmdSurfaceDMASuffix *pSuffix;
187 unsigned region_flags;
188 unsigned surface_flags;
189 struct pipe_resource *dummy;
190
191 if(transfer == SVGA3D_WRITE_HOST_VRAM) {
192 region_flags = SVGA_RELOC_READ;
193 surface_flags = SVGA_RELOC_WRITE;
194 }
195 else if(transfer == SVGA3D_READ_HOST_VRAM) {
196 region_flags = SVGA_RELOC_WRITE;
197 surface_flags = SVGA_RELOC_READ;
198 }
199 else {
200 assert(0);
201 return PIPE_ERROR_BAD_INPUT;
202 }
203
204 assert(numBoxes);
205
206 cmd = SVGA3D_FIFOReserve(swc,
207 SVGA_3D_CMD_SURFACE_DMA,
208 sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
209 2);
210 if(!cmd)
211 return PIPE_ERROR_OUT_OF_MEMORY;
212
213 swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
214 cmd->guest.pitch = 0;
215
216 swc->surface_relocation(swc, &cmd->host.sid, host, surface_flags);
217 cmd->host.face = 0;
218 cmd->host.mipmap = 0;
219
220 cmd->transfer = transfer;
221
222 sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
223 sbuf->dma.svga = svga;
224
225 /* Increment reference count */
226 dummy = NULL;
227 pipe_resource_reference(&dummy, &sbuf->b.b);
228
229 pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
230 pSuffix->suffixSize = sizeof *pSuffix;
231 pSuffix->maximumOffset = sbuf->b.b.width0;
232 pSuffix->flags = sbuf->dma.flags;
233
234 SVGA_FIFOCommitAll(swc);
235
236 sbuf->dma.flags.discard = FALSE;
237
238 return PIPE_OK;
239 }
240
241
242 /**
243 * Patch up the upload DMA command reserved by svga_buffer_upload_command
244 * with the final ranges.
245 */
246 void
247 svga_buffer_upload_flush(struct svga_context *svga,
248 struct svga_buffer *sbuf)
249 {
250 SVGA3dCopyBox *boxes;
251 unsigned i;
252 struct pipe_resource *dummy;
253
254 if (!sbuf->dma.pending) {
255 return;
256 }
257
258 assert(sbuf->handle);
259 assert(sbuf->hwbuf);
260 assert(sbuf->map.num_ranges);
261 assert(sbuf->dma.svga == svga);
262 assert(sbuf->dma.boxes);
263
264 /*
265 * Patch the DMA command with the final copy box.
266 */
267
268 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
269
270 boxes = sbuf->dma.boxes;
271 for(i = 0; i < sbuf->map.num_ranges; ++i) {
272 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
273 sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
274
275 boxes[i].x = sbuf->map.ranges[i].start;
276 boxes[i].y = 0;
277 boxes[i].z = 0;
278 boxes[i].w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
279 boxes[i].h = 1;
280 boxes[i].d = 1;
281 boxes[i].srcx = sbuf->map.ranges[i].start;
282 boxes[i].srcy = 0;
283 boxes[i].srcz = 0;
284 }
285
286 sbuf->map.num_ranges = 0;
287
288 assert(sbuf->head.prev && sbuf->head.next);
289 LIST_DEL(&sbuf->head);
290 #ifdef DEBUG
291 sbuf->head.next = sbuf->head.prev = NULL;
292 #endif
293 sbuf->dma.pending = FALSE;
294 sbuf->dma.flags.discard = FALSE;
295 sbuf->dma.flags.unsynchronized = FALSE;
296
297 sbuf->dma.svga = NULL;
298 sbuf->dma.boxes = NULL;
299
300 /* Decrement reference count (and potentially destroy) */
301 dummy = &sbuf->b.b;
302 pipe_resource_reference(&dummy, NULL);
303 }
304
305
306 /**
307 * Note a dirty range.
308 *
309 * This function only notes the range down. It doesn't actually emit a DMA
310 * upload command. That only happens when a context tries to refer to this
311 * buffer, and the DMA upload command is added to that context's command buffer.
312 *
313 * We try to lump as many contiguous DMA transfers together as possible.
314 */
315 void
316 svga_buffer_add_range(struct svga_buffer *sbuf,
317 unsigned start,
318 unsigned end)
319 {
320 unsigned i;
321 unsigned nearest_range;
322 unsigned nearest_dist;
323
324 assert(end > start);
325
326 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
327 nearest_range = sbuf->map.num_ranges;
328 nearest_dist = ~0;
329 } else {
330 nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
331 nearest_dist = 0;
332 }
333
334 /*
335 * Try to grow one of the ranges.
336 */
337
338 for(i = 0; i < sbuf->map.num_ranges; ++i) {
339 int left_dist;
340 int right_dist;
341 int dist;
342
343 left_dist = start - sbuf->map.ranges[i].end;
344 right_dist = sbuf->map.ranges[i].start - end;
345 dist = MAX2(left_dist, right_dist);
346
347 if (dist <= 0) {
348 /*
349 * Ranges are contiguous or overlapping -- extend this one and return.
350 *
351 * Note that it is not this function's task to prevent overlapping
352 * ranges, as the GMR was already given so it is too late to do
353 * anything. If the ranges overlap here it must surely be because
354 * PIPE_TRANSFER_UNSYNCHRONIZED was set.
355 */
356
357 sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
358 sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
359 return;
360 }
361 else {
362 /*
363 * Discontiguous ranges -- keep track of the nearest range.
364 */
365
366 if (dist < nearest_dist) {
367 nearest_range = i;
368 nearest_dist = dist;
369 }
370 }
371 }
372
373 /*
374 * We cannot add a new range to an existing DMA command, so patch-up the
375 * pending DMA upload and start clean.
376 */
377
378 svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
379
380 assert(!sbuf->dma.pending);
381 assert(!sbuf->dma.svga);
382 assert(!sbuf->dma.boxes);
383
384 if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
385 /*
386 * Add a new range.
387 */
388
389 sbuf->map.ranges[sbuf->map.num_ranges].start = start;
390 sbuf->map.ranges[sbuf->map.num_ranges].end = end;
391 ++sbuf->map.num_ranges;
392 } else {
393 /*
394 * Everything else failed, so just extend the nearest range.
395 *
396 * It is OK to do this because we always keep a local copy of the
397 * host buffer data, for SW TNL, and the host never modifies the buffer.
398 */
399
400 assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
401 assert(nearest_range < sbuf->map.num_ranges);
402 sbuf->map.ranges[nearest_range].start = MIN2(sbuf->map.ranges[nearest_range].start, start);
403 sbuf->map.ranges[nearest_range].end = MAX2(sbuf->map.ranges[nearest_range].end, end);
404 }
405 }
406
407
408
409 /**
410 * Copy the contents of the malloc buffer to a hardware buffer.
411 */
412 static INLINE enum pipe_error
413 svga_buffer_update_hw(struct svga_screen *ss, struct svga_buffer *sbuf)
414 {
415 assert(!sbuf->user);
416 if(!sbuf->hwbuf) {
417 enum pipe_error ret;
418 void *map;
419
420 assert(sbuf->swbuf);
421 if(!sbuf->swbuf)
422 return PIPE_ERROR;
423
424 ret = svga_buffer_create_hw_storage(ss, sbuf);
425 if(ret != PIPE_OK)
426 return ret;
427
428 pipe_mutex_lock(ss->swc_mutex);
429 map = ss->sws->buffer_map(ss->sws, sbuf->hwbuf, PIPE_TRANSFER_WRITE);
430 assert(map);
431 if(!map) {
432 pipe_mutex_unlock(ss->swc_mutex);
433 svga_buffer_destroy_hw_storage(ss, sbuf);
434 return PIPE_ERROR;
435 }
436
437 memcpy(map, sbuf->swbuf, sbuf->b.b.width0);
438 ss->sws->buffer_unmap(ss->sws, sbuf->hwbuf);
439
440 /* This user/malloc buffer is now indistinguishable from a gpu buffer */
441 assert(!sbuf->map.count);
442 if(!sbuf->map.count) {
443 if(sbuf->user)
444 sbuf->user = FALSE;
445 else
446 align_free(sbuf->swbuf);
447 sbuf->swbuf = NULL;
448 }
449
450 pipe_mutex_unlock(ss->swc_mutex);
451 }
452
453 return PIPE_OK;
454 }
455
456
457 /**
458 * Upload the buffer to the host in a piecewise fashion.
459 *
460 * Used when the buffer is too big to fit in the GMR aperture.
461 */
462 static INLINE enum pipe_error
463 svga_buffer_upload_piecewise(struct svga_screen *ss,
464 struct svga_context *svga,
465 struct svga_buffer *sbuf)
466 {
467 struct svga_winsys_screen *sws = ss->sws;
468 const unsigned alignment = sizeof(void *);
469 const unsigned usage = 0;
470 unsigned i;
471
472 assert(sbuf->map.num_ranges);
473 assert(!sbuf->dma.pending);
474
475 SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
476
477 for (i = 0; i < sbuf->map.num_ranges; ++i) {
478 struct svga_buffer_range *range = &sbuf->map.ranges[i];
479 unsigned offset = range->start;
480 unsigned size = range->end - range->start;
481
482 while (offset < range->end) {
483 struct svga_winsys_buffer *hwbuf;
484 uint8_t *map;
485 enum pipe_error ret;
486
487 if (offset + size > range->end)
488 size = range->end - offset;
489
490 hwbuf = sws->buffer_create(sws, alignment, usage, size);
491 while (!hwbuf) {
492 size /= 2;
493 if (!size)
494 return PIPE_ERROR_OUT_OF_MEMORY;
495 hwbuf = sws->buffer_create(sws, alignment, usage, size);
496 }
497
498 SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
499 offset, offset + size);
500
501 map = sws->buffer_map(sws, hwbuf,
502 PIPE_TRANSFER_WRITE |
503 PIPE_TRANSFER_DISCARD);
504 assert(map);
505 if (map) {
506 memcpy(map, sbuf->swbuf, size);
507 sws->buffer_unmap(sws, hwbuf);
508 }
509
510 ret = SVGA3D_BufferDMA(svga->swc,
511 hwbuf, sbuf->handle,
512 SVGA3D_WRITE_HOST_VRAM,
513 size, 0, offset, sbuf->dma.flags);
514 if(ret != PIPE_OK) {
515 svga_context_flush(svga, NULL);
516 ret = SVGA3D_BufferDMA(svga->swc,
517 hwbuf, sbuf->handle,
518 SVGA3D_WRITE_HOST_VRAM,
519 size, 0, offset, sbuf->dma.flags);
520 assert(ret == PIPE_OK);
521 }
522
523 sbuf->dma.flags.discard = FALSE;
524
525 sws->buffer_destroy(sws, hwbuf);
526
527 offset += size;
528 }
529 }
530
531 sbuf->map.num_ranges = 0;
532
533 return PIPE_OK;
534 }
535
536
537
538
539 /* Get (or create/upload) the winsys surface handle so that we can
540 * refer to this buffer in fifo commands.
541 */
542 struct svga_winsys_surface *
543 svga_buffer_handle(struct svga_context *svga,
544 struct pipe_resource *buf)
545 {
546 struct pipe_screen *screen = svga->pipe.screen;
547 struct svga_screen *ss = svga_screen(screen);
548 struct svga_buffer *sbuf;
549 enum pipe_error ret;
550
551 if(!buf)
552 return NULL;
553
554 sbuf = svga_buffer(buf);
555
556 assert(!sbuf->map.count);
557 assert(!sbuf->user);
558
559 if(!sbuf->handle) {
560 ret = svga_buffer_create_host_surface(ss, sbuf);
561 if(ret != PIPE_OK)
562 return NULL;
563 }
564
565 assert(sbuf->handle);
566
567 if (sbuf->map.num_ranges) {
568 if (!sbuf->dma.pending) {
569 /*
570 * No pending DMA upload yet, so insert a DMA upload command now.
571 */
572
573 /*
574 * Migrate the data from swbuf -> hwbuf if necessary.
575 */
576 ret = svga_buffer_update_hw(ss, sbuf);
577 if (ret == PIPE_OK) {
578 /*
579 * Queue a dma command.
580 */
581
582 ret = svga_buffer_upload_command(svga, sbuf);
583 if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
584 svga_context_flush(svga, NULL);
585 ret = svga_buffer_upload_command(svga, sbuf);
586 assert(ret == PIPE_OK);
587 }
588 if (ret == PIPE_OK) {
589 sbuf->dma.pending = TRUE;
590 assert(!sbuf->head.prev && !sbuf->head.next);
591 LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
592 }
593 }
594 else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
595 /*
596 * The buffer is too big to fit in the GMR aperture, so break it in
597 * smaller pieces.
598 */
599 ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
600 }
601
602 if (ret != PIPE_OK) {
603 /*
604 * Something unexpected happened above. There is very little that
605 * we can do other than proceeding while ignoring the dirty ranges.
606 */
607 assert(0);
608 sbuf->map.num_ranges = 0;
609 }
610 }
611 else {
612 /*
613 * There a pending dma already. Make sure it is from this context.
614 */
615 assert(sbuf->dma.svga == svga);
616 }
617 }
618
619 assert(!sbuf->map.num_ranges || sbuf->dma.pending);
620
621 return sbuf->handle;
622 }
623
624
625
626 void
627 svga_context_flush_buffers(struct svga_context *svga)
628 {
629 struct list_head *curr, *next;
630 struct svga_buffer *sbuf;
631
632 curr = svga->dirty_buffers.next;
633 next = curr->next;
634 while(curr != &svga->dirty_buffers) {
635 sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
636
637 assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
638 assert(sbuf->dma.pending);
639
640 svga_buffer_upload_flush(svga, sbuf);
641
642 curr = next;
643 next = curr->next;
644 }
645 }
646
647
648 void
649 svga_redefine_user_buffer(struct pipe_context *pipe,
650 struct pipe_resource *resource,
651 unsigned offset,
652 unsigned size)
653 {
654 struct svga_buffer *sbuf = svga_buffer(resource);
655
656 assert(sbuf->user);
657 assert(!sbuf->dma.pending);
658 assert(!sbuf->handle);
659 assert(!sbuf->hwbuf);
660
661 /*
662 * We always treat the contents of user-buffers as volatile,
663 * so no particular action needed here.
664 */
665
666 }