e16f73583c7327dba7b3a184b66613c16472b333
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "util/u_format.h"
33 #include "util/u_inlines.h"
34 #include "util/u_math.h"
35 #include "util/u_memory.h"
36 #include "util/u_resource.h"
37 #include "util/u_upload_mgr.h"
38
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48
49
50 static void
51 svga_transfer_dma_band(struct svga_context *svga,
52 struct svga_transfer *st,
53 SVGA3dTransferType transfer,
54 unsigned x, unsigned y, unsigned z,
55 unsigned w, unsigned h, unsigned d,
56 unsigned srcx, unsigned srcy, unsigned srcz,
57 SVGA3dSurfaceDMAFlags flags)
58 {
59 struct svga_texture *texture = svga_texture(st->base.resource);
60 SVGA3dCopyBox box;
61 enum pipe_error ret;
62
63 assert(!st->use_direct_map);
64
65 box.x = x;
66 box.y = y;
67 box.z = z;
68 box.w = w;
69 box.h = h;
70 box.d = d;
71 box.srcx = srcx;
72 box.srcy = srcy;
73 box.srcz = srcz;
74
75 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
76 "(%u, %u, %u), %ubpp\n",
77 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
78 texture->handle,
79 st->slice,
80 x,
81 y,
82 z,
83 x + w,
84 y + h,
85 z + 1,
86 util_format_get_blocksize(texture->b.b.format) * 8 /
87 (util_format_get_blockwidth(texture->b.b.format)
88 * util_format_get_blockheight(texture->b.b.format)));
89
90 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
91 if (ret != PIPE_OK) {
92 svga_context_flush(svga, NULL);
93 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94 assert(ret == PIPE_OK);
95 }
96 }
97
98
99 static void
100 svga_transfer_dma(struct svga_context *svga,
101 struct svga_transfer *st,
102 SVGA3dTransferType transfer,
103 SVGA3dSurfaceDMAFlags flags)
104 {
105 struct svga_texture *texture = svga_texture(st->base.resource);
106 struct svga_screen *screen = svga_screen(texture->b.b.screen);
107 struct svga_winsys_screen *sws = screen->sws;
108 struct pipe_fence_handle *fence = NULL;
109
110 assert(!st->use_direct_map);
111
112 if (transfer == SVGA3D_READ_HOST_VRAM) {
113 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
114 }
115
116 /* Ensure any pending operations on host surfaces are queued on the command
117 * buffer first.
118 */
119 svga_surfaces_flush( svga );
120
121 if (!st->swbuf) {
122 /* Do the DMA transfer in a single go */
123 svga_transfer_dma_band(svga, st, transfer,
124 st->base.box.x, st->base.box.y, st->base.box.z,
125 st->base.box.width, st->base.box.height, st->base.box.depth,
126 0, 0, 0,
127 flags);
128
129 if (transfer == SVGA3D_READ_HOST_VRAM) {
130 svga_context_flush(svga, &fence);
131 sws->fence_finish(sws, fence, 0);
132 sws->fence_reference(sws, &fence, NULL);
133 }
134 }
135 else {
136 int y, h, srcy;
137 unsigned blockheight =
138 util_format_get_blockheight(st->base.resource->format);
139
140 h = st->hw_nblocksy * blockheight;
141 srcy = 0;
142
143 for (y = 0; y < st->base.box.height; y += h) {
144 unsigned offset, length;
145 void *hw, *sw;
146
147 if (y + h > st->base.box.height)
148 h = st->base.box.height - y;
149
150 /* Transfer band must be aligned to pixel block boundaries */
151 assert(y % blockheight == 0);
152 assert(h % blockheight == 0);
153
154 offset = y * st->base.stride / blockheight;
155 length = h * st->base.stride / blockheight;
156
157 sw = (uint8_t *) st->swbuf + offset;
158
159 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
160 unsigned usage = PIPE_TRANSFER_WRITE;
161
162 /* Wait for the previous DMAs to complete */
163 /* TODO: keep one DMA (at half the size) in the background */
164 if (y) {
165 svga_context_flush(svga, NULL);
166 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
167 }
168
169 hw = sws->buffer_map(sws, st->hwbuf, usage);
170 assert(hw);
171 if (hw) {
172 memcpy(hw, sw, length);
173 sws->buffer_unmap(sws, st->hwbuf);
174 }
175 }
176
177 svga_transfer_dma_band(svga, st, transfer,
178 st->base.box.x, y, st->base.box.z,
179 st->base.box.width, h, st->base.box.depth,
180 0, srcy, 0, flags);
181
182 /*
183 * Prevent the texture contents to be discarded on the next band
184 * upload.
185 */
186 flags.discard = FALSE;
187
188 if (transfer == SVGA3D_READ_HOST_VRAM) {
189 svga_context_flush(svga, &fence);
190 sws->fence_finish(sws, fence, 0);
191
192 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
193 assert(hw);
194 if (hw) {
195 memcpy(sw, hw, length);
196 sws->buffer_unmap(sws, st->hwbuf);
197 }
198 }
199 }
200 }
201 }
202
203
204
205 static boolean
206 svga_texture_get_handle(struct pipe_screen *screen,
207 struct pipe_resource *texture,
208 struct winsys_handle *whandle)
209 {
210 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
211 unsigned stride;
212
213 assert(svga_texture(texture)->key.cachable == 0);
214 svga_texture(texture)->key.cachable = 0;
215
216 stride = util_format_get_nblocksx(texture->format, texture->width0) *
217 util_format_get_blocksize(texture->format);
218
219 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
220 stride, whandle);
221 }
222
223
224 static void
225 svga_texture_destroy(struct pipe_screen *screen,
226 struct pipe_resource *pt)
227 {
228 struct svga_screen *ss = svga_screen(screen);
229 struct svga_texture *tex = svga_texture(pt);
230
231 ss->texture_timestamp++;
232
233 svga_sampler_view_reference(&tex->cached_view, NULL);
234
235 /*
236 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
237 */
238 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
239 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
240
241 ss->hud.total_resource_bytes -= tex->size;
242
243 FREE(tex->defined);
244 FREE(tex->rendered_to);
245 FREE(tex->dirty);
246 FREE(tex);
247
248 assert(ss->hud.num_resources > 0);
249 if (ss->hud.num_resources > 0)
250 ss->hud.num_resources--;
251 }
252
253
254 /**
255 * Determine if the resource was rendered to
256 */
257 static inline boolean
258 was_tex_rendered_to(struct pipe_resource *resource,
259 const struct pipe_transfer *transfer)
260 {
261 unsigned face;
262
263 if (resource->target == PIPE_TEXTURE_CUBE) {
264 assert(transfer->box.depth == 1);
265 face = transfer->box.z;
266 }
267 else {
268 face = 0;
269 }
270
271 return svga_was_texture_rendered_to(svga_texture(resource),
272 face, transfer->level);
273 }
274
275
276 /**
277 * Determine if we need to read back a texture image before mapping it.
278 */
279 static inline boolean
280 need_tex_readback(struct pipe_transfer *transfer)
281 {
282 if (transfer->usage & PIPE_TRANSFER_READ)
283 return TRUE;
284
285 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
286 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
287 return was_tex_rendered_to(transfer->resource, transfer);
288 }
289
290 return FALSE;
291 }
292
293
294 static enum pipe_error
295 readback_image_vgpu9(struct svga_context *svga,
296 struct svga_winsys_surface *surf,
297 unsigned slice,
298 unsigned level)
299 {
300 enum pipe_error ret;
301
302 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
303 if (ret != PIPE_OK) {
304 svga_context_flush(svga, NULL);
305 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
306 }
307 return ret;
308 }
309
310
311 static enum pipe_error
312 readback_image_vgpu10(struct svga_context *svga,
313 struct svga_winsys_surface *surf,
314 unsigned slice,
315 unsigned level,
316 unsigned numMipLevels)
317 {
318 enum pipe_error ret;
319 unsigned subResource;
320
321 subResource = slice * numMipLevels + level;
322 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
323 if (ret != PIPE_OK) {
324 svga_context_flush(svga, NULL);
325 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
326 }
327 return ret;
328 }
329
330
331 /**
332 * Use DMA for the transfer request
333 */
334 static void *
335 svga_texture_transfer_map_dma(struct svga_context *svga,
336 struct svga_transfer *st)
337 {
338 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
339 struct pipe_resource *texture = st->base.resource;
340 unsigned nblocksx, nblocksy;
341 unsigned d;
342 unsigned usage = st->base.usage;
343
344 /* we'll put the data into a tightly packed buffer */
345 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
346 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
347 d = st->base.box.depth;
348
349 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
350 st->base.layer_stride = st->base.stride * nblocksy;
351 st->hw_nblocksy = nblocksy;
352
353 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
354 st->hw_nblocksy * st->base.stride * d);
355
356 while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
357 st->hwbuf =
358 svga_winsys_buffer_create(svga, 1, 0,
359 st->hw_nblocksy * st->base.stride * d);
360 }
361
362 if (!st->hwbuf)
363 return NULL;
364
365 if (st->hw_nblocksy < nblocksy) {
366 /* We couldn't allocate a hardware buffer big enough for the transfer,
367 * so allocate regular malloc memory instead
368 */
369 if (0) {
370 debug_printf("%s: failed to allocate %u KB of DMA, "
371 "splitting into %u x %u KB DMA transfers\n",
372 __FUNCTION__,
373 (nblocksy * st->base.stride + 1023) / 1024,
374 (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
375 (st->hw_nblocksy * st->base.stride + 1023) / 1024);
376 }
377
378 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
379 if (!st->swbuf) {
380 sws->buffer_destroy(sws, st->hwbuf);
381 return NULL;
382 }
383 }
384
385 if (usage & PIPE_TRANSFER_READ) {
386 SVGA3dSurfaceDMAFlags flags;
387 memset(&flags, 0, sizeof flags);
388 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
389 }
390
391 if (st->swbuf) {
392 return st->swbuf;
393 }
394 else {
395 return sws->buffer_map(sws, st->hwbuf, usage);
396 }
397 }
398
399
400 /**
401 * Use direct map for the transfer request
402 */
403 static void *
404 svga_texture_transfer_map_direct(struct svga_context *svga,
405 struct svga_transfer *st)
406 {
407 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
408 struct pipe_transfer *transfer = &st->base;
409 struct pipe_resource *texture = transfer->resource;
410 struct svga_texture *tex = svga_texture(texture);
411 struct svga_winsys_surface *surf = tex->handle;
412 unsigned level = st->base.level;
413 unsigned w, h, nblocksx, nblocksy;
414 unsigned usage = st->base.usage;
415
416 if (need_tex_readback(transfer)) {
417 enum pipe_error ret;
418
419 svga_surfaces_flush(svga);
420
421 if (svga_have_vgpu10(svga)) {
422 ret = readback_image_vgpu10(svga, surf, st->slice, level,
423 tex->b.b.last_level + 1);
424 } else {
425 ret = readback_image_vgpu9(svga, surf, st->slice, level);
426 }
427
428 svga->hud.num_readbacks++;
429 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
430
431 assert(ret == PIPE_OK);
432 (void) ret;
433
434 svga_context_flush(svga, NULL);
435 /*
436 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
437 * we could potentially clear the flag for all faces/layers/mips.
438 */
439 svga_clear_texture_rendered_to(tex, st->slice, level);
440 }
441 else {
442 assert(usage & PIPE_TRANSFER_WRITE);
443 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
444 if (svga_is_texture_dirty(tex, st->slice, level)) {
445 /*
446 * do a surface flush if the subresource has been modified
447 * in this command buffer.
448 */
449 svga_surfaces_flush(svga);
450 if (!sws->surface_is_flushed(sws, surf)) {
451 svga->hud.surface_write_flushes++;
452 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
453 svga_context_flush(svga, NULL);
454 }
455 }
456 }
457 }
458
459 /* we'll directly access the guest-backed surface */
460 w = u_minify(texture->width0, level);
461 h = u_minify(texture->height0, level);
462 nblocksx = util_format_get_nblocksx(texture->format, w);
463 nblocksy = util_format_get_nblocksy(texture->format, h);
464 st->hw_nblocksy = nblocksy;
465 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
466 st->base.layer_stride = st->base.stride * nblocksy;
467
468 /*
469 * Begin mapping code
470 */
471 {
472 SVGA3dSize baseLevelSize;
473 uint8_t *map;
474 boolean retry;
475 unsigned offset, mip_width, mip_height;
476
477 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
478 if (map == NULL && retry) {
479 /*
480 * At this point, the svga_surfaces_flush() should already have
481 * called in svga_texture_get_transfer().
482 */
483 svga->hud.surface_write_flushes++;
484 svga_context_flush(svga, NULL);
485 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
486 }
487
488 /*
489 * Make sure we return NULL if the map fails
490 */
491 if (!map) {
492 return NULL;
493 }
494
495 /**
496 * Compute the offset to the specific texture slice in the buffer.
497 */
498 baseLevelSize.width = tex->b.b.width0;
499 baseLevelSize.height = tex->b.b.height0;
500 baseLevelSize.depth = tex->b.b.depth0;
501
502 if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
503 (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY)) {
504 st->base.layer_stride =
505 svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
506 tex->b.b.last_level + 1, 1, 0);
507 }
508
509 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
510 tex->b.b.last_level + 1, /* numMips */
511 st->slice, level);
512 if (level > 0) {
513 assert(offset > 0);
514 }
515
516 mip_width = u_minify(tex->b.b.width0, level);
517 mip_height = u_minify(tex->b.b.height0, level);
518
519 offset += svga3dsurface_get_pixel_offset(tex->key.format,
520 mip_width, mip_height,
521 st->base.box.x,
522 st->base.box.y,
523 st->base.box.z);
524 return (void *) (map + offset);
525 }
526 }
527
528
529 /**
530 * Request a transfer map to the texture resource
531 */
532 static void *
533 svga_texture_transfer_map(struct pipe_context *pipe,
534 struct pipe_resource *texture,
535 unsigned level,
536 unsigned usage,
537 const struct pipe_box *box,
538 struct pipe_transfer **ptransfer)
539 {
540 struct svga_context *svga = svga_context(pipe);
541 struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
542 struct svga_texture *tex = svga_texture(texture);
543 struct svga_transfer *st;
544 struct svga_winsys_surface *surf = tex->handle;
545 boolean use_direct_map = svga_have_gb_objects(svga) &&
546 !svga_have_gb_dma(svga);
547 void *map = NULL;
548 int64_t begin = svga_get_time(svga);
549
550 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
551
552 if (!surf)
553 goto done;
554
555 /* We can't map texture storage directly unless we have GB objects */
556 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
557 if (svga_have_gb_objects(svga))
558 use_direct_map = TRUE;
559 else
560 goto done;
561 }
562
563 st = CALLOC_STRUCT(svga_transfer);
564 if (!st)
565 goto done;
566
567 st->base.level = level;
568 st->base.usage = usage;
569 st->base.box = *box;
570
571 switch (tex->b.b.target) {
572 case PIPE_TEXTURE_CUBE:
573 st->slice = st->base.box.z;
574 st->base.box.z = 0; /* so we don't apply double offsets below */
575 break;
576 case PIPE_TEXTURE_2D_ARRAY:
577 case PIPE_TEXTURE_1D_ARRAY:
578 st->slice = st->base.box.z;
579 st->base.box.z = 0; /* so we don't apply double offsets below */
580
581 /* Force direct map for transfering multiple slices */
582 if (st->base.box.depth > 1)
583 use_direct_map = svga_have_gb_objects(svga);
584
585 break;
586 default:
587 st->slice = 0;
588 break;
589 }
590
591 st->use_direct_map = use_direct_map;
592 pipe_resource_reference(&st->base.resource, texture);
593
594 /* If this is the first time mapping to the surface in this
595 * command buffer, clear the dirty masks of this surface.
596 */
597 if (sws->surface_is_flushed(sws, surf)) {
598 svga_clear_texture_dirty(tex);
599 }
600
601 if (!use_direct_map) {
602 /* upload to the DMA buffer */
603 map = svga_texture_transfer_map_dma(svga, st);
604 }
605 else {
606 boolean can_use_upload = tex->can_use_upload &&
607 !(st->base.usage & PIPE_TRANSFER_READ);
608 boolean was_rendered_to = was_tex_rendered_to(texture, &st->base);
609
610 /* If the texture was already rendered to and upload buffer
611 * is supported, then we will use upload buffer to
612 * avoid the need to read back the texture content; otherwise,
613 * we'll first try to map directly to the GB surface, if it is blocked,
614 * then we'll try the upload buffer.
615 */
616 if (was_rendered_to && can_use_upload) {
617 map = svga_texture_transfer_map_upload(svga, st);
618 }
619 else {
620 unsigned orig_usage = st->base.usage;
621
622 /* First try directly map to the GB surface */
623 if (can_use_upload)
624 st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
625 map = svga_texture_transfer_map_direct(svga, st);
626 st->base.usage = orig_usage;
627
628 if (!map && can_use_upload) {
629 /* if direct map with DONTBLOCK fails, then try upload to the
630 * texture upload buffer.
631 */
632 map = svga_texture_transfer_map_upload(svga, st);
633 }
634 }
635
636 /* If upload fails, then try direct map again without forcing it
637 * to DONTBLOCK.
638 */
639 if (!map) {
640 map = svga_texture_transfer_map_direct(svga, st);
641 }
642 }
643
644 if (!map) {
645 FREE(st);
646 }
647 else {
648 *ptransfer = &st->base;
649 svga->hud.num_textures_mapped++;
650 if (usage & PIPE_TRANSFER_WRITE) {
651 /* record texture upload for HUD */
652 svga->hud.num_bytes_uploaded +=
653 st->base.layer_stride * st->base.box.depth;
654
655 /* mark this texture level as dirty */
656 svga_set_texture_dirty(tex, st->slice, level);
657 }
658 }
659
660 done:
661 svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
662 SVGA_STATS_TIME_POP(sws);
663 (void) sws;
664
665 return map;
666 }
667
668 /**
669 * Unmap a GB texture surface.
670 */
671 static void
672 svga_texture_surface_unmap(struct svga_context *svga,
673 struct pipe_transfer *transfer)
674 {
675 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
676 struct svga_winsys_context *swc = svga->swc;
677 boolean rebind;
678
679 assert(surf);
680
681 swc->surface_unmap(swc, surf, &rebind);
682 if (rebind) {
683 enum pipe_error ret;
684 ret = SVGA3D_BindGBSurface(swc, surf);
685 if (ret != PIPE_OK) {
686 /* flush and retry */
687 svga_context_flush(svga, NULL);
688 ret = SVGA3D_BindGBSurface(swc, surf);
689 assert(ret == PIPE_OK);
690 }
691 }
692 }
693
694
695 static enum pipe_error
696 update_image_vgpu9(struct svga_context *svga,
697 struct svga_winsys_surface *surf,
698 const SVGA3dBox *box,
699 unsigned slice,
700 unsigned level)
701 {
702 enum pipe_error ret;
703
704 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
705 if (ret != PIPE_OK) {
706 svga_context_flush(svga, NULL);
707 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
708 }
709 return ret;
710 }
711
712
713 static enum pipe_error
714 update_image_vgpu10(struct svga_context *svga,
715 struct svga_winsys_surface *surf,
716 const SVGA3dBox *box,
717 unsigned slice,
718 unsigned level,
719 unsigned numMipLevels)
720 {
721 enum pipe_error ret;
722 unsigned subResource;
723
724 subResource = slice * numMipLevels + level;
725 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
726 if (ret != PIPE_OK) {
727 svga_context_flush(svga, NULL);
728 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
729 }
730 return ret;
731 }
732
733
734 /**
735 * unmap DMA transfer request
736 */
737 static void
738 svga_texture_transfer_unmap_dma(struct svga_context *svga,
739 struct svga_transfer *st)
740 {
741 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
742
743 if (st->hwbuf)
744 sws->buffer_unmap(sws, st->hwbuf);
745
746 if (st->base.usage & PIPE_TRANSFER_WRITE) {
747 /* Use DMA to transfer texture data */
748 SVGA3dSurfaceDMAFlags flags;
749
750 memset(&flags, 0, sizeof flags);
751 if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
752 flags.discard = TRUE;
753 }
754 if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
755 flags.unsynchronized = TRUE;
756 }
757
758 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
759 }
760
761 FREE(st->swbuf);
762 sws->buffer_destroy(sws, st->hwbuf);
763 }
764
765
766 /**
767 * unmap direct map transfer request
768 */
769 static void
770 svga_texture_transfer_unmap_direct(struct svga_context *svga,
771 struct svga_transfer *st)
772 {
773 struct pipe_transfer *transfer = &st->base;
774 struct svga_texture *tex = svga_texture(transfer->resource);
775
776 svga_texture_surface_unmap(svga, transfer);
777
778 /* Now send an update command to update the content in the backend. */
779 if (st->base.usage & PIPE_TRANSFER_WRITE) {
780 struct svga_winsys_surface *surf = tex->handle;
781 SVGA3dBox box;
782 enum pipe_error ret;
783 unsigned nlayers = 1;
784
785 assert(svga_have_gb_objects(svga));
786
787 /* update the effected region */
788 box.x = transfer->box.x;
789 box.y = transfer->box.y;
790 box.w = transfer->box.width;
791 box.h = transfer->box.height;
792 box.d = transfer->box.depth;
793
794 switch (tex->b.b.target) {
795 case PIPE_TEXTURE_CUBE:
796 box.z = 0;
797 break;
798 case PIPE_TEXTURE_2D_ARRAY:
799 nlayers = box.d;
800 box.z = 0;
801 box.d = 1;
802 break;
803 case PIPE_TEXTURE_1D_ARRAY:
804 nlayers = box.d;
805 box.y = box.z = 0;
806 box.d = 1;
807 break;
808 default:
809 box.z = transfer->box.z;
810 break;
811 }
812
813 if (0)
814 debug_printf("%s %d, %d, %d %d x %d x %d\n",
815 __FUNCTION__,
816 box.x, box.y, box.z,
817 box.w, box.h, box.d);
818
819 if (svga_have_vgpu10(svga)) {
820 unsigned i;
821 for (i = 0; i < nlayers; i++) {
822 ret = update_image_vgpu10(svga, surf, &box,
823 st->slice + i, transfer->level,
824 tex->b.b.last_level + 1);
825 assert(ret == PIPE_OK);
826 }
827 } else {
828 assert(nlayers == 1);
829 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
830 assert(ret == PIPE_OK);
831 }
832 (void) ret;
833 }
834 }
835
836 static void
837 svga_texture_transfer_unmap(struct pipe_context *pipe,
838 struct pipe_transfer *transfer)
839 {
840 struct svga_context *svga = svga_context(pipe);
841 struct svga_screen *ss = svga_screen(pipe->screen);
842 struct svga_winsys_screen *sws = ss->sws;
843 struct svga_transfer *st = svga_transfer(transfer);
844 struct svga_texture *tex = svga_texture(transfer->resource);
845
846 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
847
848 if (!st->use_direct_map) {
849 svga_texture_transfer_unmap_dma(svga, st);
850 }
851 else if (st->upload.buf) {
852 svga_texture_transfer_unmap_upload(svga, st);
853 }
854 else {
855 svga_texture_transfer_unmap_direct(svga, st);
856 }
857
858 if (st->base.usage & PIPE_TRANSFER_WRITE) {
859 svga->hud.num_resource_updates++;
860
861 /* Mark the texture level as dirty */
862 ss->texture_timestamp++;
863 svga_age_texture_view(tex, transfer->level);
864 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
865 svga_define_texture_level(tex, st->slice, transfer->level);
866 else
867 svga_define_texture_level(tex, 0, transfer->level);
868 }
869
870 pipe_resource_reference(&st->base.resource, NULL);
871 FREE(st);
872 SVGA_STATS_TIME_POP(sws);
873 (void) sws;
874 }
875
876
877 /**
878 * Does format store depth values?
879 */
880 static inline boolean
881 format_has_depth(enum pipe_format format)
882 {
883 const struct util_format_description *desc = util_format_description(format);
884 return util_format_has_depth(desc);
885 }
886
887
888 struct u_resource_vtbl svga_texture_vtbl =
889 {
890 svga_texture_get_handle, /* get_handle */
891 svga_texture_destroy, /* resource_destroy */
892 svga_texture_transfer_map, /* transfer_map */
893 u_default_transfer_flush_region, /* transfer_flush_region */
894 svga_texture_transfer_unmap, /* transfer_unmap */
895 };
896
897
898 struct pipe_resource *
899 svga_texture_create(struct pipe_screen *screen,
900 const struct pipe_resource *template)
901 {
902 struct svga_screen *svgascreen = svga_screen(screen);
903 struct svga_texture *tex;
904 unsigned bindings = template->bind;
905
906 SVGA_STATS_TIME_PUSH(svgascreen->sws,
907 SVGA_STATS_TIME_CREATETEXTURE);
908
909 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
910 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
911 goto fail_notex;
912 }
913
914 tex = CALLOC_STRUCT(svga_texture);
915 if (!tex) {
916 goto fail_notex;
917 }
918
919 tex->defined = CALLOC(template->depth0 * template->array_size,
920 sizeof(tex->defined[0]));
921 if (!tex->defined) {
922 FREE(tex);
923 goto fail_notex;
924 }
925
926 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
927 sizeof(tex->rendered_to[0]));
928 if (!tex->rendered_to) {
929 goto fail;
930 }
931
932 tex->dirty = CALLOC(template->depth0 * template->array_size,
933 sizeof(tex->dirty[0]));
934 if (!tex->dirty) {
935 goto fail;
936 }
937
938 tex->b.b = *template;
939 tex->b.vtbl = &svga_texture_vtbl;
940 pipe_reference_init(&tex->b.b.reference, 1);
941 tex->b.b.screen = screen;
942
943 tex->key.flags = 0;
944 tex->key.size.width = template->width0;
945 tex->key.size.height = template->height0;
946 tex->key.size.depth = template->depth0;
947 tex->key.arraySize = 1;
948 tex->key.numFaces = 1;
949
950 /* single sample texture can be treated as non-multisamples texture */
951 tex->key.sampleCount = template->nr_samples > 1 ? template->nr_samples : 0;
952
953 if (template->nr_samples > 1) {
954 tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
955 }
956
957 if (svgascreen->sws->have_vgpu10) {
958 switch (template->target) {
959 case PIPE_TEXTURE_1D:
960 tex->key.flags |= SVGA3D_SURFACE_1D;
961 break;
962 case PIPE_TEXTURE_1D_ARRAY:
963 tex->key.flags |= SVGA3D_SURFACE_1D;
964 /* fall-through */
965 case PIPE_TEXTURE_2D_ARRAY:
966 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
967 tex->key.arraySize = template->array_size;
968 break;
969 case PIPE_TEXTURE_3D:
970 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
971 break;
972 case PIPE_TEXTURE_CUBE:
973 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
974 tex->key.numFaces = 6;
975 break;
976 default:
977 break;
978 }
979 }
980 else {
981 switch (template->target) {
982 case PIPE_TEXTURE_3D:
983 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
984 break;
985 case PIPE_TEXTURE_CUBE:
986 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
987 tex->key.numFaces = 6;
988 break;
989 default:
990 break;
991 }
992 }
993
994 tex->key.cachable = 1;
995
996 if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
997 !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
998 /* Also check if the format can be sampled from */
999 if (screen->is_format_supported(screen, template->format,
1000 template->target,
1001 template->nr_samples,
1002 PIPE_BIND_SAMPLER_VIEW)) {
1003 bindings |= PIPE_BIND_SAMPLER_VIEW;
1004 }
1005 }
1006
1007 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
1008 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1009 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1010
1011 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
1012 /* Also check if the format is color renderable */
1013 if (screen->is_format_supported(screen, template->format,
1014 template->target,
1015 template->nr_samples,
1016 PIPE_BIND_RENDER_TARGET)) {
1017 bindings |= PIPE_BIND_RENDER_TARGET;
1018 }
1019 }
1020
1021 if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
1022 /* Also check if the format is depth/stencil renderable */
1023 if (screen->is_format_supported(screen, template->format,
1024 template->target,
1025 template->nr_samples,
1026 PIPE_BIND_DEPTH_STENCIL)) {
1027 bindings |= PIPE_BIND_DEPTH_STENCIL;
1028 }
1029 }
1030 }
1031
1032 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
1033 tex->key.cachable = 0;
1034 }
1035
1036 if (bindings & PIPE_BIND_SHARED) {
1037 tex->key.cachable = 0;
1038 }
1039
1040 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1041 tex->key.scanout = 1;
1042 tex->key.cachable = 0;
1043 }
1044
1045 /*
1046 * Note: Previously we never passed the
1047 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1048 * know beforehand whether a texture will be used as a rendertarget or not
1049 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1050 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1051 *
1052 * However, this was changed since other state trackers
1053 * (XA for example) uses it accurately and certain device versions
1054 * relies on it in certain situations to render correctly.
1055 */
1056 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1057 !util_format_is_s3tc(template->format)) {
1058 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1059 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1060 }
1061
1062 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1063 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1064 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1065 }
1066
1067 tex->key.numMipLevels = template->last_level + 1;
1068
1069 tex->key.format = svga_translate_format(svgascreen, template->format,
1070 bindings);
1071 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1072 goto fail;
1073 }
1074
1075 /* The actual allocation is done with a typeless format. Typeless
1076 * formats can be reinterpreted as other formats. For example,
1077 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
1078 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
1079 * Do not use typeless formats for SHARED, DISPLAY_TARGET or SCANOUT
1080 * buffers.
1081 */
1082 if (svgascreen->sws->have_vgpu10
1083 && ((bindings & (PIPE_BIND_SHARED |
1084 PIPE_BIND_DISPLAY_TARGET |
1085 PIPE_BIND_SCANOUT)) == 0)) {
1086 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1087 if (0) {
1088 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1089 svga_format_name(tex->key.format),
1090 svga_format_name(typeless),
1091 bindings);
1092 }
1093
1094 if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1095 /* We can't normally render to snorm surfaces, but once we
1096 * substitute a typeless format, we can if the rendertarget view
1097 * is unorm. This can happen with GL_ARB_copy_image.
1098 */
1099 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1100 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1101 }
1102
1103 tex->key.format = typeless;
1104 }
1105
1106 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
1107 tex->handle = svga_screen_surface_create(svgascreen, bindings,
1108 tex->b.b.usage, &tex->key);
1109 if (!tex->handle) {
1110 goto fail;
1111 }
1112
1113 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
1114
1115 debug_reference(&tex->b.b.reference,
1116 (debug_reference_descriptor)debug_describe_resource, 0);
1117
1118 tex->size = util_resource_size(template);
1119
1120 /* Determine if texture upload buffer can be used to upload this texture */
1121 tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1122 &tex->b.b);
1123
1124 svgascreen->hud.total_resource_bytes += tex->size;
1125 svgascreen->hud.num_resources++;
1126
1127 SVGA_STATS_TIME_POP(svgascreen->sws);
1128
1129 return &tex->b.b;
1130
1131 fail:
1132 if (tex->dirty)
1133 FREE(tex->dirty);
1134 if (tex->rendered_to)
1135 FREE(tex->rendered_to);
1136 if (tex->defined)
1137 FREE(tex->defined);
1138 FREE(tex);
1139 fail_notex:
1140 SVGA_STATS_TIME_POP(svgascreen->sws);
1141 return NULL;
1142 }
1143
1144
1145 struct pipe_resource *
1146 svga_texture_from_handle(struct pipe_screen *screen,
1147 const struct pipe_resource *template,
1148 struct winsys_handle *whandle)
1149 {
1150 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1151 struct svga_screen *ss = svga_screen(screen);
1152 struct svga_winsys_surface *srf;
1153 struct svga_texture *tex;
1154 enum SVGA3dSurfaceFormat format = 0;
1155 assert(screen);
1156
1157 /* Only supports one type */
1158 if ((template->target != PIPE_TEXTURE_2D &&
1159 template->target != PIPE_TEXTURE_RECT) ||
1160 template->last_level != 0 ||
1161 template->depth0 != 1) {
1162 return NULL;
1163 }
1164
1165 srf = sws->surface_from_handle(sws, whandle, &format);
1166
1167 if (!srf)
1168 return NULL;
1169
1170 if (svga_translate_format(svga_screen(screen), template->format,
1171 template->bind) != format) {
1172 unsigned f1 = svga_translate_format(svga_screen(screen),
1173 template->format, template->bind);
1174 unsigned f2 = format;
1175
1176 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
1177 */
1178 if (f1 == SVGA3D_B8G8R8A8_UNORM)
1179 f1 = SVGA3D_A8R8G8B8;
1180 if (f1 == SVGA3D_B8G8R8X8_UNORM)
1181 f1 = SVGA3D_X8R8G8B8;
1182
1183 if ( !( (f1 == f2) ||
1184 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
1185 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_B8G8R8X8_UNORM) ||
1186 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
1187 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_B8G8R8A8_UNORM) ||
1188 (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
1189 (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
1190 debug_printf("%s wrong format %s != %s\n", __FUNCTION__,
1191 svga_format_name(f1), svga_format_name(f2));
1192 return NULL;
1193 }
1194 }
1195
1196 tex = CALLOC_STRUCT(svga_texture);
1197 if (!tex)
1198 return NULL;
1199
1200 tex->defined = CALLOC(template->depth0 * template->array_size,
1201 sizeof(tex->defined[0]));
1202 if (!tex->defined) {
1203 FREE(tex);
1204 return NULL;
1205 }
1206
1207 tex->b.b = *template;
1208 tex->b.vtbl = &svga_texture_vtbl;
1209 pipe_reference_init(&tex->b.b.reference, 1);
1210 tex->b.b.screen = screen;
1211
1212 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1213
1214 tex->key.cachable = 0;
1215 tex->key.format = format;
1216 tex->handle = srf;
1217
1218 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1219 if (!tex->rendered_to)
1220 goto fail;
1221
1222 tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1223 if (!tex->dirty)
1224 goto fail;
1225
1226 tex->imported = TRUE;
1227
1228 ss->hud.num_resources++;
1229
1230 return &tex->b.b;
1231
1232 fail:
1233 if (tex->defined)
1234 FREE(tex->defined);
1235 if (tex->rendered_to)
1236 FREE(tex->rendered_to);
1237 if (tex->dirty)
1238 FREE(tex->dirty);
1239 FREE(tex);
1240 return NULL;
1241 }
1242
1243 boolean
1244 svga_texture_generate_mipmap(struct pipe_context *pipe,
1245 struct pipe_resource *pt,
1246 enum pipe_format format,
1247 unsigned base_level,
1248 unsigned last_level,
1249 unsigned first_layer,
1250 unsigned last_layer)
1251 {
1252 struct pipe_sampler_view templ, *psv;
1253 struct svga_pipe_sampler_view *sv;
1254 struct svga_context *svga = svga_context(pipe);
1255 struct svga_texture *tex = svga_texture(pt);
1256 enum pipe_error ret;
1257
1258 assert(svga_have_vgpu10(svga));
1259
1260 /* Only support 2D texture for now */
1261 if (pt->target != PIPE_TEXTURE_2D)
1262 return FALSE;
1263
1264 /* Fallback to the mipmap generation utility for those formats that
1265 * do not support hw generate mipmap
1266 */
1267 if (!svga_format_support_gen_mips(format))
1268 return FALSE;
1269
1270 /* Make sure the texture surface was created with
1271 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1272 */
1273 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1274 return FALSE;
1275
1276 templ.format = format;
1277 templ.u.tex.first_layer = first_layer;
1278 templ.u.tex.last_layer = last_layer;
1279 templ.u.tex.first_level = base_level;
1280 templ.u.tex.last_level = last_level;
1281
1282 psv = pipe->create_sampler_view(pipe, pt, &templ);
1283 if (psv == NULL)
1284 return FALSE;
1285
1286 sv = svga_pipe_sampler_view(psv);
1287 ret = svga_validate_pipe_sampler_view(svga, sv);
1288 if (ret != PIPE_OK) {
1289 svga_context_flush(svga, NULL);
1290 ret = svga_validate_pipe_sampler_view(svga, sv);
1291 assert(ret == PIPE_OK);
1292 }
1293
1294 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1295 if (ret != PIPE_OK) {
1296 svga_context_flush(svga, NULL);
1297 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1298 }
1299 pipe_sampler_view_reference(&psv, NULL);
1300
1301 svga->hud.num_generate_mipmap++;
1302
1303 return TRUE;
1304 }
1305
1306
1307 /* texture upload buffer default size in bytes */
1308 #define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1309
1310 /**
1311 * Create a texture upload buffer
1312 */
1313 boolean
1314 svga_texture_transfer_map_upload_create(struct svga_context *svga)
1315 {
1316 svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1317 0, PIPE_USAGE_STAGING);
1318 return svga->tex_upload != NULL;
1319 }
1320
1321
1322 /**
1323 * Destroy the texture upload buffer
1324 */
1325 void
1326 svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1327 {
1328 u_upload_destroy(svga->tex_upload);
1329 }
1330
1331
1332 /**
1333 * Returns true if this transfer map request can use the upload buffer.
1334 */
1335 boolean
1336 svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1337 const struct pipe_resource *texture)
1338 {
1339 if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1340 return FALSE;
1341
1342 /* TransferFromBuffer command is not well supported with multi-samples surface */
1343 if (texture->nr_samples > 1)
1344 return FALSE;
1345
1346 if (util_format_is_compressed(texture->format)) {
1347 /* XXX Need to take a closer look to see why texture upload
1348 * with 3D texture with compressed format fails
1349 */
1350 if (texture->target == PIPE_TEXTURE_3D)
1351 return FALSE;
1352 }
1353 else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1354 return FALSE;
1355 }
1356
1357 return TRUE;
1358 }
1359
1360
1361 /**
1362 * Use upload buffer for the transfer map request.
1363 */
1364 void *
1365 svga_texture_transfer_map_upload(struct svga_context *svga,
1366 struct svga_transfer *st)
1367 {
1368 struct pipe_resource *texture = st->base.resource;
1369 struct pipe_resource *tex_buffer = NULL;
1370 void *tex_map;
1371 unsigned nblocksx, nblocksy;
1372 unsigned offset;
1373 unsigned upload_size;
1374
1375 assert(svga->tex_upload);
1376
1377 st->upload.box.x = st->base.box.x;
1378 st->upload.box.y = st->base.box.y;
1379 st->upload.box.z = st->base.box.z;
1380 st->upload.box.w = st->base.box.width;
1381 st->upload.box.h = st->base.box.height;
1382 st->upload.box.d = st->base.box.depth;
1383 st->upload.nlayers = 1;
1384
1385 switch (texture->target) {
1386 case PIPE_TEXTURE_CUBE:
1387 st->upload.box.z = 0;
1388 break;
1389 case PIPE_TEXTURE_2D_ARRAY:
1390 st->upload.nlayers = st->base.box.depth;
1391 st->upload.box.z = 0;
1392 st->upload.box.d = 1;
1393 break;
1394 case PIPE_TEXTURE_1D_ARRAY:
1395 st->upload.nlayers = st->base.box.depth;
1396 st->upload.box.y = st->upload.box.z = 0;
1397 st->upload.box.d = 1;
1398 break;
1399 default:
1400 break;
1401 }
1402
1403 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1404 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1405
1406 st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1407 st->base.layer_stride = st->base.stride * nblocksy;
1408
1409 /* In order to use the TransferFromBuffer command to update the
1410 * texture content from the buffer, the layer stride for a multi-layers
1411 * surface needs to be in multiples of 16 bytes.
1412 */
1413 if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1414 return NULL;
1415
1416 upload_size = st->base.layer_stride * st->base.box.depth;
1417 upload_size = align(upload_size, 16);
1418
1419 #ifdef DEBUG
1420 if (util_format_is_compressed(texture->format)) {
1421 struct svga_texture *tex = svga_texture(texture);
1422 unsigned blockw, blockh, bytesPerBlock;
1423
1424 svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1425
1426 /* dest box must start on block boundary */
1427 assert((st->base.box.x % blockw) == 0);
1428 assert((st->base.box.y % blockh) == 0);
1429 }
1430 #endif
1431
1432 /* If the upload size exceeds the default buffer size, the
1433 * upload buffer manager code will try to allocate a new buffer
1434 * with the new buffer size.
1435 */
1436 u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1437 &offset, &tex_buffer, &tex_map);
1438
1439 if (!tex_map) {
1440 return NULL;
1441 }
1442
1443 st->upload.buf = tex_buffer;
1444 st->upload.map = tex_map;
1445 st->upload.offset = offset;
1446
1447 return tex_map;
1448 }
1449
1450
1451 /**
1452 * Unmap upload map transfer request
1453 */
1454 void
1455 svga_texture_transfer_unmap_upload(struct svga_context *svga,
1456 struct svga_transfer *st)
1457 {
1458 struct svga_winsys_surface *srcsurf;
1459 struct svga_winsys_surface *dstsurf;
1460 struct pipe_resource *texture = st->base.resource;
1461 struct svga_texture *tex = svga_texture(texture);
1462 enum pipe_error ret;
1463 unsigned subResource;
1464 unsigned numMipLevels;
1465 unsigned i, layer;
1466 unsigned offset = st->upload.offset;
1467
1468 assert(svga->tex_upload);
1469 assert(st->upload.buf);
1470
1471 /* unmap the texture upload buffer */
1472 u_upload_unmap(svga->tex_upload);
1473
1474 srcsurf = svga_buffer_handle(svga, st->upload.buf);
1475 dstsurf = svga_texture(texture)->handle;
1476 assert(dstsurf);
1477
1478 numMipLevels = texture->last_level + 1;
1479
1480 for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1481 subResource = layer * numMipLevels + st->base.level;
1482
1483 /* send a transferFromBuffer command to update the host texture surface */
1484 assert((offset & 15) == 0);
1485
1486 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1487 offset,
1488 st->base.stride,
1489 st->base.layer_stride,
1490 dstsurf, subResource,
1491 &st->upload.box);
1492 if (ret != PIPE_OK) {
1493 svga_context_flush(svga, NULL);
1494 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1495 offset,
1496 st->base.stride,
1497 st->base.layer_stride,
1498 dstsurf, subResource,
1499 &st->upload.box);
1500 assert(ret == PIPE_OK);
1501 }
1502 offset += st->base.layer_stride;
1503
1504 /* Set rendered-to flag */
1505 svga_set_texture_rendered_to(tex, layer, st->base.level);
1506 }
1507
1508 pipe_resource_reference(&st->upload.buf, NULL);
1509 }