svga: Add a more elaborate format compatibility determination v2
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "util/u_format.h"
33 #include "util/u_inlines.h"
34 #include "util/u_math.h"
35 #include "util/u_memory.h"
36 #include "util/u_resource.h"
37 #include "util/u_upload_mgr.h"
38
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48
49
50 static void
51 svga_transfer_dma_band(struct svga_context *svga,
52 struct svga_transfer *st,
53 SVGA3dTransferType transfer,
54 unsigned x, unsigned y, unsigned z,
55 unsigned w, unsigned h, unsigned d,
56 unsigned srcx, unsigned srcy, unsigned srcz,
57 SVGA3dSurfaceDMAFlags flags)
58 {
59 struct svga_texture *texture = svga_texture(st->base.resource);
60 SVGA3dCopyBox box;
61 enum pipe_error ret;
62
63 assert(!st->use_direct_map);
64
65 box.x = x;
66 box.y = y;
67 box.z = z;
68 box.w = w;
69 box.h = h;
70 box.d = d;
71 box.srcx = srcx;
72 box.srcy = srcy;
73 box.srcz = srcz;
74
75 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
76 "(%u, %u, %u), %ubpp\n",
77 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
78 texture->handle,
79 st->slice,
80 x,
81 y,
82 z,
83 x + w,
84 y + h,
85 z + 1,
86 util_format_get_blocksize(texture->b.b.format) * 8 /
87 (util_format_get_blockwidth(texture->b.b.format)
88 * util_format_get_blockheight(texture->b.b.format)));
89
90 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
91 if (ret != PIPE_OK) {
92 svga_context_flush(svga, NULL);
93 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94 assert(ret == PIPE_OK);
95 }
96 }
97
98
99 static void
100 svga_transfer_dma(struct svga_context *svga,
101 struct svga_transfer *st,
102 SVGA3dTransferType transfer,
103 SVGA3dSurfaceDMAFlags flags)
104 {
105 struct svga_texture *texture = svga_texture(st->base.resource);
106 struct svga_screen *screen = svga_screen(texture->b.b.screen);
107 struct svga_winsys_screen *sws = screen->sws;
108 struct pipe_fence_handle *fence = NULL;
109
110 assert(!st->use_direct_map);
111
112 if (transfer == SVGA3D_READ_HOST_VRAM) {
113 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
114 }
115
116 /* Ensure any pending operations on host surfaces are queued on the command
117 * buffer first.
118 */
119 svga_surfaces_flush( svga );
120
121 if (!st->swbuf) {
122 /* Do the DMA transfer in a single go */
123 svga_transfer_dma_band(svga, st, transfer,
124 st->base.box.x, st->base.box.y, st->base.box.z,
125 st->base.box.width, st->base.box.height, st->base.box.depth,
126 0, 0, 0,
127 flags);
128
129 if (transfer == SVGA3D_READ_HOST_VRAM) {
130 svga_context_flush(svga, &fence);
131 sws->fence_finish(sws, fence, 0);
132 sws->fence_reference(sws, &fence, NULL);
133 }
134 }
135 else {
136 int y, h, srcy;
137 unsigned blockheight =
138 util_format_get_blockheight(st->base.resource->format);
139
140 h = st->hw_nblocksy * blockheight;
141 srcy = 0;
142
143 for (y = 0; y < st->base.box.height; y += h) {
144 unsigned offset, length;
145 void *hw, *sw;
146
147 if (y + h > st->base.box.height)
148 h = st->base.box.height - y;
149
150 /* Transfer band must be aligned to pixel block boundaries */
151 assert(y % blockheight == 0);
152 assert(h % blockheight == 0);
153
154 offset = y * st->base.stride / blockheight;
155 length = h * st->base.stride / blockheight;
156
157 sw = (uint8_t *) st->swbuf + offset;
158
159 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
160 unsigned usage = PIPE_TRANSFER_WRITE;
161
162 /* Wait for the previous DMAs to complete */
163 /* TODO: keep one DMA (at half the size) in the background */
164 if (y) {
165 svga_context_flush(svga, NULL);
166 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
167 }
168
169 hw = sws->buffer_map(sws, st->hwbuf, usage);
170 assert(hw);
171 if (hw) {
172 memcpy(hw, sw, length);
173 sws->buffer_unmap(sws, st->hwbuf);
174 }
175 }
176
177 svga_transfer_dma_band(svga, st, transfer,
178 st->base.box.x, y, st->base.box.z,
179 st->base.box.width, h, st->base.box.depth,
180 0, srcy, 0, flags);
181
182 /*
183 * Prevent the texture contents to be discarded on the next band
184 * upload.
185 */
186 flags.discard = FALSE;
187
188 if (transfer == SVGA3D_READ_HOST_VRAM) {
189 svga_context_flush(svga, &fence);
190 sws->fence_finish(sws, fence, 0);
191
192 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
193 assert(hw);
194 if (hw) {
195 memcpy(sw, hw, length);
196 sws->buffer_unmap(sws, st->hwbuf);
197 }
198 }
199 }
200 }
201 }
202
203
204
205 static boolean
206 svga_texture_get_handle(struct pipe_screen *screen,
207 struct pipe_resource *texture,
208 struct winsys_handle *whandle)
209 {
210 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
211 unsigned stride;
212
213 assert(svga_texture(texture)->key.cachable == 0);
214 svga_texture(texture)->key.cachable = 0;
215
216 stride = util_format_get_nblocksx(texture->format, texture->width0) *
217 util_format_get_blocksize(texture->format);
218
219 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
220 stride, whandle);
221 }
222
223
224 static void
225 svga_texture_destroy(struct pipe_screen *screen,
226 struct pipe_resource *pt)
227 {
228 struct svga_screen *ss = svga_screen(screen);
229 struct svga_texture *tex = svga_texture(pt);
230
231 ss->texture_timestamp++;
232
233 svga_sampler_view_reference(&tex->cached_view, NULL);
234
235 /*
236 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
237 */
238 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
239 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
240
241 /* Destroy the backed surface handle if exists */
242 if (tex->backed_handle)
243 svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);
244
245 ss->hud.total_resource_bytes -= tex->size;
246
247 FREE(tex->defined);
248 FREE(tex->rendered_to);
249 FREE(tex->dirty);
250 FREE(tex);
251
252 assert(ss->hud.num_resources > 0);
253 if (ss->hud.num_resources > 0)
254 ss->hud.num_resources--;
255 }
256
257
258 /**
259 * Determine if the resource was rendered to
260 */
261 static inline boolean
262 was_tex_rendered_to(struct pipe_resource *resource,
263 const struct pipe_transfer *transfer)
264 {
265 unsigned face;
266
267 if (resource->target == PIPE_TEXTURE_CUBE) {
268 assert(transfer->box.depth == 1);
269 face = transfer->box.z;
270 }
271 else {
272 face = 0;
273 }
274
275 return svga_was_texture_rendered_to(svga_texture(resource),
276 face, transfer->level);
277 }
278
279
280 /**
281 * Determine if we need to read back a texture image before mapping it.
282 */
283 static inline boolean
284 need_tex_readback(struct pipe_transfer *transfer)
285 {
286 if (transfer->usage & PIPE_TRANSFER_READ)
287 return TRUE;
288
289 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
290 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
291 return was_tex_rendered_to(transfer->resource, transfer);
292 }
293
294 return FALSE;
295 }
296
297
298 static enum pipe_error
299 readback_image_vgpu9(struct svga_context *svga,
300 struct svga_winsys_surface *surf,
301 unsigned slice,
302 unsigned level)
303 {
304 enum pipe_error ret;
305
306 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
307 if (ret != PIPE_OK) {
308 svga_context_flush(svga, NULL);
309 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
310 }
311 return ret;
312 }
313
314
315 static enum pipe_error
316 readback_image_vgpu10(struct svga_context *svga,
317 struct svga_winsys_surface *surf,
318 unsigned slice,
319 unsigned level,
320 unsigned numMipLevels)
321 {
322 enum pipe_error ret;
323 unsigned subResource;
324
325 subResource = slice * numMipLevels + level;
326 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
327 if (ret != PIPE_OK) {
328 svga_context_flush(svga, NULL);
329 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
330 }
331 return ret;
332 }
333
334
335 /**
336 * Use DMA for the transfer request
337 */
338 static void *
339 svga_texture_transfer_map_dma(struct svga_context *svga,
340 struct svga_transfer *st)
341 {
342 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
343 struct pipe_resource *texture = st->base.resource;
344 unsigned nblocksx, nblocksy;
345 unsigned d;
346 unsigned usage = st->base.usage;
347
348 /* we'll put the data into a tightly packed buffer */
349 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
350 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
351 d = st->base.box.depth;
352
353 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
354 st->base.layer_stride = st->base.stride * nblocksy;
355 st->hw_nblocksy = nblocksy;
356
357 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
358 st->hw_nblocksy * st->base.stride * d);
359
360 while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
361 st->hwbuf =
362 svga_winsys_buffer_create(svga, 1, 0,
363 st->hw_nblocksy * st->base.stride * d);
364 }
365
366 if (!st->hwbuf)
367 return NULL;
368
369 if (st->hw_nblocksy < nblocksy) {
370 /* We couldn't allocate a hardware buffer big enough for the transfer,
371 * so allocate regular malloc memory instead
372 */
373 if (0) {
374 debug_printf("%s: failed to allocate %u KB of DMA, "
375 "splitting into %u x %u KB DMA transfers\n",
376 __FUNCTION__,
377 (nblocksy * st->base.stride + 1023) / 1024,
378 (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
379 (st->hw_nblocksy * st->base.stride + 1023) / 1024);
380 }
381
382 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
383 if (!st->swbuf) {
384 sws->buffer_destroy(sws, st->hwbuf);
385 return NULL;
386 }
387 }
388
389 if (usage & PIPE_TRANSFER_READ) {
390 SVGA3dSurfaceDMAFlags flags;
391 memset(&flags, 0, sizeof flags);
392 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
393 }
394
395 if (st->swbuf) {
396 return st->swbuf;
397 }
398 else {
399 return sws->buffer_map(sws, st->hwbuf, usage);
400 }
401 }
402
403
404 /**
405 * Use direct map for the transfer request
406 */
407 static void *
408 svga_texture_transfer_map_direct(struct svga_context *svga,
409 struct svga_transfer *st)
410 {
411 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
412 struct pipe_transfer *transfer = &st->base;
413 struct pipe_resource *texture = transfer->resource;
414 struct svga_texture *tex = svga_texture(texture);
415 struct svga_winsys_surface *surf = tex->handle;
416 unsigned level = st->base.level;
417 unsigned w, h, nblocksx, nblocksy;
418 unsigned usage = st->base.usage;
419
420 if (need_tex_readback(transfer)) {
421 enum pipe_error ret;
422
423 svga_surfaces_flush(svga);
424
425 if (svga_have_vgpu10(svga)) {
426 ret = readback_image_vgpu10(svga, surf, st->slice, level,
427 tex->b.b.last_level + 1);
428 } else {
429 ret = readback_image_vgpu9(svga, surf, st->slice, level);
430 }
431
432 svga->hud.num_readbacks++;
433 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
434
435 assert(ret == PIPE_OK);
436 (void) ret;
437
438 svga_context_flush(svga, NULL);
439 /*
440 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
441 * we could potentially clear the flag for all faces/layers/mips.
442 */
443 svga_clear_texture_rendered_to(tex, st->slice, level);
444 }
445 else {
446 assert(usage & PIPE_TRANSFER_WRITE);
447 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
448 if (svga_is_texture_dirty(tex, st->slice, level)) {
449 /*
450 * do a surface flush if the subresource has been modified
451 * in this command buffer.
452 */
453 svga_surfaces_flush(svga);
454 if (!sws->surface_is_flushed(sws, surf)) {
455 svga->hud.surface_write_flushes++;
456 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
457 svga_context_flush(svga, NULL);
458 }
459 }
460 }
461 }
462
463 /* we'll directly access the guest-backed surface */
464 w = u_minify(texture->width0, level);
465 h = u_minify(texture->height0, level);
466 nblocksx = util_format_get_nblocksx(texture->format, w);
467 nblocksy = util_format_get_nblocksy(texture->format, h);
468 st->hw_nblocksy = nblocksy;
469 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
470 st->base.layer_stride = st->base.stride * nblocksy;
471
472 /*
473 * Begin mapping code
474 */
475 {
476 SVGA3dSize baseLevelSize;
477 uint8_t *map;
478 boolean retry;
479 unsigned offset, mip_width, mip_height;
480
481 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
482 if (map == NULL && retry) {
483 /*
484 * At this point, the svga_surfaces_flush() should already have
485 * called in svga_texture_get_transfer().
486 */
487 svga->hud.surface_write_flushes++;
488 svga_context_flush(svga, NULL);
489 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
490 }
491
492 /*
493 * Make sure we return NULL if the map fails
494 */
495 if (!map) {
496 return NULL;
497 }
498
499 /**
500 * Compute the offset to the specific texture slice in the buffer.
501 */
502 baseLevelSize.width = tex->b.b.width0;
503 baseLevelSize.height = tex->b.b.height0;
504 baseLevelSize.depth = tex->b.b.depth0;
505
506 if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
507 (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY)) {
508 st->base.layer_stride =
509 svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
510 tex->b.b.last_level + 1, 1, 0);
511 }
512
513 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
514 tex->b.b.last_level + 1, /* numMips */
515 st->slice, level);
516 if (level > 0) {
517 assert(offset > 0);
518 }
519
520 mip_width = u_minify(tex->b.b.width0, level);
521 mip_height = u_minify(tex->b.b.height0, level);
522
523 offset += svga3dsurface_get_pixel_offset(tex->key.format,
524 mip_width, mip_height,
525 st->base.box.x,
526 st->base.box.y,
527 st->base.box.z);
528 return (void *) (map + offset);
529 }
530 }
531
532
533 /**
534 * Request a transfer map to the texture resource
535 */
536 static void *
537 svga_texture_transfer_map(struct pipe_context *pipe,
538 struct pipe_resource *texture,
539 unsigned level,
540 unsigned usage,
541 const struct pipe_box *box,
542 struct pipe_transfer **ptransfer)
543 {
544 struct svga_context *svga = svga_context(pipe);
545 struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
546 struct svga_texture *tex = svga_texture(texture);
547 struct svga_transfer *st;
548 struct svga_winsys_surface *surf = tex->handle;
549 boolean use_direct_map = svga_have_gb_objects(svga) &&
550 !svga_have_gb_dma(svga);
551 void *map = NULL;
552 int64_t begin = svga_get_time(svga);
553
554 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
555
556 if (!surf)
557 goto done;
558
559 /* We can't map texture storage directly unless we have GB objects */
560 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
561 if (svga_have_gb_objects(svga))
562 use_direct_map = TRUE;
563 else
564 goto done;
565 }
566
567 st = CALLOC_STRUCT(svga_transfer);
568 if (!st)
569 goto done;
570
571 st->base.level = level;
572 st->base.usage = usage;
573 st->base.box = *box;
574
575 switch (tex->b.b.target) {
576 case PIPE_TEXTURE_CUBE:
577 st->slice = st->base.box.z;
578 st->base.box.z = 0; /* so we don't apply double offsets below */
579 break;
580 case PIPE_TEXTURE_2D_ARRAY:
581 case PIPE_TEXTURE_1D_ARRAY:
582 st->slice = st->base.box.z;
583 st->base.box.z = 0; /* so we don't apply double offsets below */
584
585 /* Force direct map for transfering multiple slices */
586 if (st->base.box.depth > 1)
587 use_direct_map = svga_have_gb_objects(svga);
588
589 break;
590 default:
591 st->slice = 0;
592 break;
593 }
594
595 st->use_direct_map = use_direct_map;
596 pipe_resource_reference(&st->base.resource, texture);
597
598 /* If this is the first time mapping to the surface in this
599 * command buffer, clear the dirty masks of this surface.
600 */
601 if (sws->surface_is_flushed(sws, surf)) {
602 svga_clear_texture_dirty(tex);
603 }
604
605 if (!use_direct_map) {
606 /* upload to the DMA buffer */
607 map = svga_texture_transfer_map_dma(svga, st);
608 }
609 else {
610 boolean can_use_upload = tex->can_use_upload &&
611 !(st->base.usage & PIPE_TRANSFER_READ);
612 boolean was_rendered_to = was_tex_rendered_to(texture, &st->base);
613
614 /* If the texture was already rendered to and upload buffer
615 * is supported, then we will use upload buffer to
616 * avoid the need to read back the texture content; otherwise,
617 * we'll first try to map directly to the GB surface, if it is blocked,
618 * then we'll try the upload buffer.
619 */
620 if (was_rendered_to && can_use_upload) {
621 map = svga_texture_transfer_map_upload(svga, st);
622 }
623 else {
624 unsigned orig_usage = st->base.usage;
625
626 /* First try directly map to the GB surface */
627 if (can_use_upload)
628 st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
629 map = svga_texture_transfer_map_direct(svga, st);
630 st->base.usage = orig_usage;
631
632 if (!map && can_use_upload) {
633 /* if direct map with DONTBLOCK fails, then try upload to the
634 * texture upload buffer.
635 */
636 map = svga_texture_transfer_map_upload(svga, st);
637 }
638 }
639
640 /* If upload fails, then try direct map again without forcing it
641 * to DONTBLOCK.
642 */
643 if (!map) {
644 map = svga_texture_transfer_map_direct(svga, st);
645 }
646 }
647
648 if (!map) {
649 FREE(st);
650 }
651 else {
652 *ptransfer = &st->base;
653 svga->hud.num_textures_mapped++;
654 if (usage & PIPE_TRANSFER_WRITE) {
655 /* record texture upload for HUD */
656 svga->hud.num_bytes_uploaded +=
657 st->base.layer_stride * st->base.box.depth;
658
659 /* mark this texture level as dirty */
660 svga_set_texture_dirty(tex, st->slice, level);
661 }
662 }
663
664 done:
665 svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
666 SVGA_STATS_TIME_POP(sws);
667 (void) sws;
668
669 return map;
670 }
671
672 /**
673 * Unmap a GB texture surface.
674 */
675 static void
676 svga_texture_surface_unmap(struct svga_context *svga,
677 struct pipe_transfer *transfer)
678 {
679 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
680 struct svga_winsys_context *swc = svga->swc;
681 boolean rebind;
682
683 assert(surf);
684
685 swc->surface_unmap(swc, surf, &rebind);
686 if (rebind) {
687 enum pipe_error ret;
688 ret = SVGA3D_BindGBSurface(swc, surf);
689 if (ret != PIPE_OK) {
690 /* flush and retry */
691 svga_context_flush(svga, NULL);
692 ret = SVGA3D_BindGBSurface(swc, surf);
693 assert(ret == PIPE_OK);
694 }
695 }
696 }
697
698
699 static enum pipe_error
700 update_image_vgpu9(struct svga_context *svga,
701 struct svga_winsys_surface *surf,
702 const SVGA3dBox *box,
703 unsigned slice,
704 unsigned level)
705 {
706 enum pipe_error ret;
707
708 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
709 if (ret != PIPE_OK) {
710 svga_context_flush(svga, NULL);
711 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
712 }
713 return ret;
714 }
715
716
717 static enum pipe_error
718 update_image_vgpu10(struct svga_context *svga,
719 struct svga_winsys_surface *surf,
720 const SVGA3dBox *box,
721 unsigned slice,
722 unsigned level,
723 unsigned numMipLevels)
724 {
725 enum pipe_error ret;
726 unsigned subResource;
727
728 subResource = slice * numMipLevels + level;
729 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
730 if (ret != PIPE_OK) {
731 svga_context_flush(svga, NULL);
732 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
733 }
734 return ret;
735 }
736
737
738 /**
739 * unmap DMA transfer request
740 */
741 static void
742 svga_texture_transfer_unmap_dma(struct svga_context *svga,
743 struct svga_transfer *st)
744 {
745 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
746
747 if (st->hwbuf)
748 sws->buffer_unmap(sws, st->hwbuf);
749
750 if (st->base.usage & PIPE_TRANSFER_WRITE) {
751 /* Use DMA to transfer texture data */
752 SVGA3dSurfaceDMAFlags flags;
753
754 memset(&flags, 0, sizeof flags);
755 if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
756 flags.discard = TRUE;
757 }
758 if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
759 flags.unsynchronized = TRUE;
760 }
761
762 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
763 }
764
765 FREE(st->swbuf);
766 sws->buffer_destroy(sws, st->hwbuf);
767 }
768
769
770 /**
771 * unmap direct map transfer request
772 */
773 static void
774 svga_texture_transfer_unmap_direct(struct svga_context *svga,
775 struct svga_transfer *st)
776 {
777 struct pipe_transfer *transfer = &st->base;
778 struct svga_texture *tex = svga_texture(transfer->resource);
779
780 svga_texture_surface_unmap(svga, transfer);
781
782 /* Now send an update command to update the content in the backend. */
783 if (st->base.usage & PIPE_TRANSFER_WRITE) {
784 struct svga_winsys_surface *surf = tex->handle;
785 SVGA3dBox box;
786 enum pipe_error ret;
787 unsigned nlayers = 1;
788
789 assert(svga_have_gb_objects(svga));
790
791 /* update the effected region */
792 box.x = transfer->box.x;
793 box.y = transfer->box.y;
794 box.w = transfer->box.width;
795 box.h = transfer->box.height;
796 box.d = transfer->box.depth;
797
798 switch (tex->b.b.target) {
799 case PIPE_TEXTURE_CUBE:
800 box.z = 0;
801 break;
802 case PIPE_TEXTURE_2D_ARRAY:
803 nlayers = box.d;
804 box.z = 0;
805 box.d = 1;
806 break;
807 case PIPE_TEXTURE_1D_ARRAY:
808 nlayers = box.d;
809 box.y = box.z = 0;
810 box.d = 1;
811 break;
812 default:
813 box.z = transfer->box.z;
814 break;
815 }
816
817 if (0)
818 debug_printf("%s %d, %d, %d %d x %d x %d\n",
819 __FUNCTION__,
820 box.x, box.y, box.z,
821 box.w, box.h, box.d);
822
823 if (svga_have_vgpu10(svga)) {
824 unsigned i;
825 for (i = 0; i < nlayers; i++) {
826 ret = update_image_vgpu10(svga, surf, &box,
827 st->slice + i, transfer->level,
828 tex->b.b.last_level + 1);
829 assert(ret == PIPE_OK);
830 }
831 } else {
832 assert(nlayers == 1);
833 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
834 assert(ret == PIPE_OK);
835 }
836 (void) ret;
837 }
838 }
839
840 static void
841 svga_texture_transfer_unmap(struct pipe_context *pipe,
842 struct pipe_transfer *transfer)
843 {
844 struct svga_context *svga = svga_context(pipe);
845 struct svga_screen *ss = svga_screen(pipe->screen);
846 struct svga_winsys_screen *sws = ss->sws;
847 struct svga_transfer *st = svga_transfer(transfer);
848 struct svga_texture *tex = svga_texture(transfer->resource);
849
850 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
851
852 if (!st->use_direct_map) {
853 svga_texture_transfer_unmap_dma(svga, st);
854 }
855 else if (st->upload.buf) {
856 svga_texture_transfer_unmap_upload(svga, st);
857 }
858 else {
859 svga_texture_transfer_unmap_direct(svga, st);
860 }
861
862 if (st->base.usage & PIPE_TRANSFER_WRITE) {
863 svga->hud.num_resource_updates++;
864
865 /* Mark the texture level as dirty */
866 ss->texture_timestamp++;
867 svga_age_texture_view(tex, transfer->level);
868 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
869 svga_define_texture_level(tex, st->slice, transfer->level);
870 else
871 svga_define_texture_level(tex, 0, transfer->level);
872 }
873
874 pipe_resource_reference(&st->base.resource, NULL);
875 FREE(st);
876 SVGA_STATS_TIME_POP(sws);
877 (void) sws;
878 }
879
880
881 /**
882 * Does format store depth values?
883 */
884 static inline boolean
885 format_has_depth(enum pipe_format format)
886 {
887 const struct util_format_description *desc = util_format_description(format);
888 return util_format_has_depth(desc);
889 }
890
891
892 struct u_resource_vtbl svga_texture_vtbl =
893 {
894 svga_texture_get_handle, /* get_handle */
895 svga_texture_destroy, /* resource_destroy */
896 svga_texture_transfer_map, /* transfer_map */
897 u_default_transfer_flush_region, /* transfer_flush_region */
898 svga_texture_transfer_unmap, /* transfer_unmap */
899 };
900
901
902 struct pipe_resource *
903 svga_texture_create(struct pipe_screen *screen,
904 const struct pipe_resource *template)
905 {
906 struct svga_screen *svgascreen = svga_screen(screen);
907 struct svga_texture *tex;
908 unsigned bindings = template->bind;
909
910 SVGA_STATS_TIME_PUSH(svgascreen->sws,
911 SVGA_STATS_TIME_CREATETEXTURE);
912
913 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
914 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
915 goto fail_notex;
916 }
917
918 tex = CALLOC_STRUCT(svga_texture);
919 if (!tex) {
920 goto fail_notex;
921 }
922
923 tex->defined = CALLOC(template->depth0 * template->array_size,
924 sizeof(tex->defined[0]));
925 if (!tex->defined) {
926 FREE(tex);
927 goto fail_notex;
928 }
929
930 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
931 sizeof(tex->rendered_to[0]));
932 if (!tex->rendered_to) {
933 goto fail;
934 }
935
936 tex->dirty = CALLOC(template->depth0 * template->array_size,
937 sizeof(tex->dirty[0]));
938 if (!tex->dirty) {
939 goto fail;
940 }
941
942 tex->b.b = *template;
943 tex->b.vtbl = &svga_texture_vtbl;
944 pipe_reference_init(&tex->b.b.reference, 1);
945 tex->b.b.screen = screen;
946
947 tex->key.flags = 0;
948 tex->key.size.width = template->width0;
949 tex->key.size.height = template->height0;
950 tex->key.size.depth = template->depth0;
951 tex->key.arraySize = 1;
952 tex->key.numFaces = 1;
953
954 /* single sample texture can be treated as non-multisamples texture */
955 tex->key.sampleCount = template->nr_samples > 1 ? template->nr_samples : 0;
956
957 if (template->nr_samples > 1) {
958 tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
959 }
960
961 if (svgascreen->sws->have_vgpu10) {
962 switch (template->target) {
963 case PIPE_TEXTURE_1D:
964 tex->key.flags |= SVGA3D_SURFACE_1D;
965 break;
966 case PIPE_TEXTURE_1D_ARRAY:
967 tex->key.flags |= SVGA3D_SURFACE_1D;
968 /* fall-through */
969 case PIPE_TEXTURE_2D_ARRAY:
970 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
971 tex->key.arraySize = template->array_size;
972 break;
973 case PIPE_TEXTURE_3D:
974 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
975 break;
976 case PIPE_TEXTURE_CUBE:
977 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
978 tex->key.numFaces = 6;
979 break;
980 default:
981 break;
982 }
983 }
984 else {
985 switch (template->target) {
986 case PIPE_TEXTURE_3D:
987 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
988 break;
989 case PIPE_TEXTURE_CUBE:
990 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
991 tex->key.numFaces = 6;
992 break;
993 default:
994 break;
995 }
996 }
997
998 tex->key.cachable = 1;
999
1000 if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
1001 !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
1002 /* Also check if the format can be sampled from */
1003 if (screen->is_format_supported(screen, template->format,
1004 template->target,
1005 template->nr_samples,
1006 PIPE_BIND_SAMPLER_VIEW)) {
1007 bindings |= PIPE_BIND_SAMPLER_VIEW;
1008 }
1009 }
1010
1011 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
1012 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1013 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1014
1015 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
1016 /* Also check if the format is color renderable */
1017 if (screen->is_format_supported(screen, template->format,
1018 template->target,
1019 template->nr_samples,
1020 PIPE_BIND_RENDER_TARGET)) {
1021 bindings |= PIPE_BIND_RENDER_TARGET;
1022 }
1023 }
1024
1025 if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
1026 /* Also check if the format is depth/stencil renderable */
1027 if (screen->is_format_supported(screen, template->format,
1028 template->target,
1029 template->nr_samples,
1030 PIPE_BIND_DEPTH_STENCIL)) {
1031 bindings |= PIPE_BIND_DEPTH_STENCIL;
1032 }
1033 }
1034 }
1035
1036 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
1037 tex->key.cachable = 0;
1038 }
1039
1040 if (bindings & PIPE_BIND_SHARED) {
1041 tex->key.cachable = 0;
1042 }
1043
1044 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1045 tex->key.scanout = 1;
1046 tex->key.cachable = 0;
1047 }
1048
1049 /*
1050 * Note: Previously we never passed the
1051 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1052 * know beforehand whether a texture will be used as a rendertarget or not
1053 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1054 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1055 *
1056 * However, this was changed since other state trackers
1057 * (XA for example) uses it accurately and certain device versions
1058 * relies on it in certain situations to render correctly.
1059 */
1060 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1061 !util_format_is_s3tc(template->format)) {
1062 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1063 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1064 }
1065
1066 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1067 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1068 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1069 }
1070
1071 tex->key.numMipLevels = template->last_level + 1;
1072
1073 tex->key.format = svga_translate_format(svgascreen, template->format,
1074 bindings);
1075 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1076 goto fail;
1077 }
1078
1079 /* Use typeless formats for sRGB and depth resources. Typeless
1080 * formats can be reinterpreted as other formats. For example,
1081 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
1082 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
1083 */
1084 if (svgascreen->sws->have_vgpu10 &&
1085 (util_format_is_srgb(template->format) ||
1086 format_has_depth(template->format))) {
1087 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1088 if (0) {
1089 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1090 svga_format_name(tex->key.format),
1091 svga_format_name(typeless),
1092 bindings);
1093 }
1094
1095 if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1096 /* We can't normally render to snorm surfaces, but once we
1097 * substitute a typeless format, we can if the rendertarget view
1098 * is unorm. This can happen with GL_ARB_copy_image.
1099 */
1100 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1101 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1102 }
1103
1104 tex->key.format = typeless;
1105 }
1106
1107 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
1108 tex->handle = svga_screen_surface_create(svgascreen, bindings,
1109 tex->b.b.usage,
1110 &tex->validated, &tex->key);
1111 if (!tex->handle) {
1112 goto fail;
1113 }
1114
1115 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
1116
1117 debug_reference(&tex->b.b.reference,
1118 (debug_reference_descriptor)debug_describe_resource, 0);
1119
1120 tex->size = util_resource_size(template);
1121
1122 /* Determine if texture upload buffer can be used to upload this texture */
1123 tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1124 &tex->b.b);
1125
1126 /* Initialize the backing resource cache */
1127 tex->backed_handle = NULL;
1128
1129 svgascreen->hud.total_resource_bytes += tex->size;
1130 svgascreen->hud.num_resources++;
1131
1132 SVGA_STATS_TIME_POP(svgascreen->sws);
1133
1134 return &tex->b.b;
1135
1136 fail:
1137 if (tex->dirty)
1138 FREE(tex->dirty);
1139 if (tex->rendered_to)
1140 FREE(tex->rendered_to);
1141 if (tex->defined)
1142 FREE(tex->defined);
1143 FREE(tex);
1144 fail_notex:
1145 SVGA_STATS_TIME_POP(svgascreen->sws);
1146 return NULL;
1147 }
1148
1149
1150 struct pipe_resource *
1151 svga_texture_from_handle(struct pipe_screen *screen,
1152 const struct pipe_resource *template,
1153 struct winsys_handle *whandle)
1154 {
1155 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1156 struct svga_screen *ss = svga_screen(screen);
1157 struct svga_winsys_surface *srf;
1158 struct svga_texture *tex;
1159 enum SVGA3dSurfaceFormat format = 0;
1160 assert(screen);
1161
1162 /* Only supports one type */
1163 if ((template->target != PIPE_TEXTURE_2D &&
1164 template->target != PIPE_TEXTURE_RECT) ||
1165 template->last_level != 0 ||
1166 template->depth0 != 1) {
1167 return NULL;
1168 }
1169
1170 srf = sws->surface_from_handle(sws, whandle, &format);
1171
1172 if (!srf)
1173 return NULL;
1174
1175 if (!svga_format_is_shareable(ss, template->format, format,
1176 template->bind, true))
1177 goto out_unref;
1178
1179 tex = CALLOC_STRUCT(svga_texture);
1180 if (!tex)
1181 goto out_unref;
1182
1183 tex->defined = CALLOC(template->depth0 * template->array_size,
1184 sizeof(tex->defined[0]));
1185 if (!tex->defined)
1186 goto out_no_defined;
1187
1188 tex->b.b = *template;
1189 tex->b.vtbl = &svga_texture_vtbl;
1190 pipe_reference_init(&tex->b.b.reference, 1);
1191 tex->b.b.screen = screen;
1192
1193 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1194
1195 tex->key.cachable = 0;
1196 tex->key.format = format;
1197 tex->handle = srf;
1198
1199 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1200 if (!tex->rendered_to)
1201 goto out_no_rendered_to;
1202
1203 tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1204 if (!tex->dirty)
1205 goto out_no_dirty;
1206
1207 tex->imported = TRUE;
1208
1209 ss->hud.num_resources++;
1210
1211 return &tex->b.b;
1212
1213 out_no_dirty:
1214 FREE(tex->rendered_to);
1215 out_no_rendered_to:
1216 FREE(tex->defined);
1217 out_no_defined:
1218 FREE(tex);
1219 out_unref:
1220 sws->surface_reference(sws, &srf, NULL);
1221 return NULL;
1222 }
1223
1224 boolean
1225 svga_texture_generate_mipmap(struct pipe_context *pipe,
1226 struct pipe_resource *pt,
1227 enum pipe_format format,
1228 unsigned base_level,
1229 unsigned last_level,
1230 unsigned first_layer,
1231 unsigned last_layer)
1232 {
1233 struct pipe_sampler_view templ, *psv;
1234 struct svga_pipe_sampler_view *sv;
1235 struct svga_context *svga = svga_context(pipe);
1236 struct svga_texture *tex = svga_texture(pt);
1237 enum pipe_error ret;
1238
1239 assert(svga_have_vgpu10(svga));
1240
1241 /* Only support 2D texture for now */
1242 if (pt->target != PIPE_TEXTURE_2D)
1243 return FALSE;
1244
1245 /* Fallback to the mipmap generation utility for those formats that
1246 * do not support hw generate mipmap
1247 */
1248 if (!svga_format_support_gen_mips(format))
1249 return FALSE;
1250
1251 /* Make sure the texture surface was created with
1252 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1253 */
1254 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1255 return FALSE;
1256
1257 templ.format = format;
1258 templ.u.tex.first_layer = first_layer;
1259 templ.u.tex.last_layer = last_layer;
1260 templ.u.tex.first_level = base_level;
1261 templ.u.tex.last_level = last_level;
1262
1263 psv = pipe->create_sampler_view(pipe, pt, &templ);
1264 if (psv == NULL)
1265 return FALSE;
1266
1267 sv = svga_pipe_sampler_view(psv);
1268 ret = svga_validate_pipe_sampler_view(svga, sv);
1269 if (ret != PIPE_OK) {
1270 svga_context_flush(svga, NULL);
1271 ret = svga_validate_pipe_sampler_view(svga, sv);
1272 assert(ret == PIPE_OK);
1273 }
1274
1275 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1276 if (ret != PIPE_OK) {
1277 svga_context_flush(svga, NULL);
1278 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1279 }
1280 pipe_sampler_view_reference(&psv, NULL);
1281
1282 svga->hud.num_generate_mipmap++;
1283
1284 return TRUE;
1285 }
1286
1287
1288 /* texture upload buffer default size in bytes */
1289 #define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1290
1291 /**
1292 * Create a texture upload buffer
1293 */
1294 boolean
1295 svga_texture_transfer_map_upload_create(struct svga_context *svga)
1296 {
1297 svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1298 0, PIPE_USAGE_STAGING);
1299 return svga->tex_upload != NULL;
1300 }
1301
1302
1303 /**
1304 * Destroy the texture upload buffer
1305 */
1306 void
1307 svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1308 {
1309 u_upload_destroy(svga->tex_upload);
1310 }
1311
1312
1313 /**
1314 * Returns true if this transfer map request can use the upload buffer.
1315 */
1316 boolean
1317 svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1318 const struct pipe_resource *texture)
1319 {
1320 if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1321 return FALSE;
1322
1323 /* TransferFromBuffer command is not well supported with multi-samples surface */
1324 if (texture->nr_samples > 1)
1325 return FALSE;
1326
1327 if (util_format_is_compressed(texture->format)) {
1328 /* XXX Need to take a closer look to see why texture upload
1329 * with 3D texture with compressed format fails
1330 */
1331 if (texture->target == PIPE_TEXTURE_3D)
1332 return FALSE;
1333 }
1334 else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1335 return FALSE;
1336 }
1337
1338 return TRUE;
1339 }
1340
1341
1342 /**
1343 * Use upload buffer for the transfer map request.
1344 */
1345 void *
1346 svga_texture_transfer_map_upload(struct svga_context *svga,
1347 struct svga_transfer *st)
1348 {
1349 struct pipe_resource *texture = st->base.resource;
1350 struct pipe_resource *tex_buffer = NULL;
1351 void *tex_map;
1352 unsigned nblocksx, nblocksy;
1353 unsigned offset;
1354 unsigned upload_size;
1355
1356 assert(svga->tex_upload);
1357
1358 st->upload.box.x = st->base.box.x;
1359 st->upload.box.y = st->base.box.y;
1360 st->upload.box.z = st->base.box.z;
1361 st->upload.box.w = st->base.box.width;
1362 st->upload.box.h = st->base.box.height;
1363 st->upload.box.d = st->base.box.depth;
1364 st->upload.nlayers = 1;
1365
1366 switch (texture->target) {
1367 case PIPE_TEXTURE_CUBE:
1368 st->upload.box.z = 0;
1369 break;
1370 case PIPE_TEXTURE_2D_ARRAY:
1371 st->upload.nlayers = st->base.box.depth;
1372 st->upload.box.z = 0;
1373 st->upload.box.d = 1;
1374 break;
1375 case PIPE_TEXTURE_1D_ARRAY:
1376 st->upload.nlayers = st->base.box.depth;
1377 st->upload.box.y = st->upload.box.z = 0;
1378 st->upload.box.d = 1;
1379 break;
1380 default:
1381 break;
1382 }
1383
1384 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1385 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1386
1387 st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1388 st->base.layer_stride = st->base.stride * nblocksy;
1389
1390 /* In order to use the TransferFromBuffer command to update the
1391 * texture content from the buffer, the layer stride for a multi-layers
1392 * surface needs to be in multiples of 16 bytes.
1393 */
1394 if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1395 return NULL;
1396
1397 upload_size = st->base.layer_stride * st->base.box.depth;
1398 upload_size = align(upload_size, 16);
1399
1400 #ifdef DEBUG
1401 if (util_format_is_compressed(texture->format)) {
1402 struct svga_texture *tex = svga_texture(texture);
1403 unsigned blockw, blockh, bytesPerBlock;
1404
1405 svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1406
1407 /* dest box must start on block boundary */
1408 assert((st->base.box.x % blockw) == 0);
1409 assert((st->base.box.y % blockh) == 0);
1410 }
1411 #endif
1412
1413 /* If the upload size exceeds the default buffer size, the
1414 * upload buffer manager code will try to allocate a new buffer
1415 * with the new buffer size.
1416 */
1417 u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1418 &offset, &tex_buffer, &tex_map);
1419
1420 if (!tex_map) {
1421 return NULL;
1422 }
1423
1424 st->upload.buf = tex_buffer;
1425 st->upload.map = tex_map;
1426 st->upload.offset = offset;
1427
1428 return tex_map;
1429 }
1430
1431
1432 /**
1433 * Unmap upload map transfer request
1434 */
1435 void
1436 svga_texture_transfer_unmap_upload(struct svga_context *svga,
1437 struct svga_transfer *st)
1438 {
1439 struct svga_winsys_surface *srcsurf;
1440 struct svga_winsys_surface *dstsurf;
1441 struct pipe_resource *texture = st->base.resource;
1442 struct svga_texture *tex = svga_texture(texture);
1443 enum pipe_error ret;
1444 unsigned subResource;
1445 unsigned numMipLevels;
1446 unsigned i, layer;
1447 unsigned offset = st->upload.offset;
1448
1449 assert(svga->tex_upload);
1450 assert(st->upload.buf);
1451
1452 /* unmap the texture upload buffer */
1453 u_upload_unmap(svga->tex_upload);
1454
1455 srcsurf = svga_buffer_handle(svga, st->upload.buf);
1456 dstsurf = svga_texture(texture)->handle;
1457 assert(dstsurf);
1458
1459 numMipLevels = texture->last_level + 1;
1460
1461 for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1462 subResource = layer * numMipLevels + st->base.level;
1463
1464 /* send a transferFromBuffer command to update the host texture surface */
1465 assert((offset & 15) == 0);
1466
1467 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1468 offset,
1469 st->base.stride,
1470 st->base.layer_stride,
1471 dstsurf, subResource,
1472 &st->upload.box);
1473 if (ret != PIPE_OK) {
1474 svga_context_flush(svga, NULL);
1475 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1476 offset,
1477 st->base.stride,
1478 st->base.layer_stride,
1479 dstsurf, subResource,
1480 &st->upload.box);
1481 assert(ret == PIPE_OK);
1482 }
1483 offset += st->base.layer_stride;
1484
1485 /* Set rendered-to flag */
1486 svga_set_texture_rendered_to(tex, layer, st->base.level);
1487 }
1488
1489 pipe_resource_reference(&st->upload.buf, NULL);
1490 }