53cdb93c91480b2536cbc987a3535c5c354755d7
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "util/u_format.h"
33 #include "util/u_inlines.h"
34 #include "util/u_math.h"
35 #include "util/u_memory.h"
36 #include "util/u_resource.h"
37 #include "util/u_upload_mgr.h"
38
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48
49
50 static void
51 svga_transfer_dma_band(struct svga_context *svga,
52 struct svga_transfer *st,
53 SVGA3dTransferType transfer,
54 unsigned x, unsigned y, unsigned z,
55 unsigned w, unsigned h, unsigned d,
56 unsigned srcx, unsigned srcy, unsigned srcz,
57 SVGA3dSurfaceDMAFlags flags)
58 {
59 struct svga_texture *texture = svga_texture(st->base.resource);
60 SVGA3dCopyBox box;
61 enum pipe_error ret;
62
63 assert(!st->use_direct_map);
64
65 box.x = x;
66 box.y = y;
67 box.z = z;
68 box.w = w;
69 box.h = h;
70 box.d = d;
71 box.srcx = srcx;
72 box.srcy = srcy;
73 box.srcz = srcz;
74
75 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
76 "(%u, %u, %u), %ubpp\n",
77 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
78 texture->handle,
79 st->slice,
80 x,
81 y,
82 z,
83 x + w,
84 y + h,
85 z + 1,
86 util_format_get_blocksize(texture->b.b.format) * 8 /
87 (util_format_get_blockwidth(texture->b.b.format)
88 * util_format_get_blockheight(texture->b.b.format)));
89
90 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
91 if (ret != PIPE_OK) {
92 svga_context_flush(svga, NULL);
93 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
94 assert(ret == PIPE_OK);
95 }
96 }
97
98
99 static void
100 svga_transfer_dma(struct svga_context *svga,
101 struct svga_transfer *st,
102 SVGA3dTransferType transfer,
103 SVGA3dSurfaceDMAFlags flags)
104 {
105 struct svga_texture *texture = svga_texture(st->base.resource);
106 struct svga_screen *screen = svga_screen(texture->b.b.screen);
107 struct svga_winsys_screen *sws = screen->sws;
108 struct pipe_fence_handle *fence = NULL;
109
110 assert(!st->use_direct_map);
111
112 if (transfer == SVGA3D_READ_HOST_VRAM) {
113 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
114 }
115
116 /* Ensure any pending operations on host surfaces are queued on the command
117 * buffer first.
118 */
119 svga_surfaces_flush( svga );
120
121 if (!st->swbuf) {
122 /* Do the DMA transfer in a single go */
123 svga_transfer_dma_band(svga, st, transfer,
124 st->base.box.x, st->base.box.y, st->base.box.z,
125 st->base.box.width, st->base.box.height, st->base.box.depth,
126 0, 0, 0,
127 flags);
128
129 if (transfer == SVGA3D_READ_HOST_VRAM) {
130 svga_context_flush(svga, &fence);
131 sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
132 sws->fence_reference(sws, &fence, NULL);
133 }
134 }
135 else {
136 int y, h, srcy;
137 unsigned blockheight =
138 util_format_get_blockheight(st->base.resource->format);
139
140 h = st->hw_nblocksy * blockheight;
141 srcy = 0;
142
143 for (y = 0; y < st->base.box.height; y += h) {
144 unsigned offset, length;
145 void *hw, *sw;
146
147 if (y + h > st->base.box.height)
148 h = st->base.box.height - y;
149
150 /* Transfer band must be aligned to pixel block boundaries */
151 assert(y % blockheight == 0);
152 assert(h % blockheight == 0);
153
154 offset = y * st->base.stride / blockheight;
155 length = h * st->base.stride / blockheight;
156
157 sw = (uint8_t *) st->swbuf + offset;
158
159 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
160 unsigned usage = PIPE_TRANSFER_WRITE;
161
162 /* Wait for the previous DMAs to complete */
163 /* TODO: keep one DMA (at half the size) in the background */
164 if (y) {
165 svga_context_flush(svga, NULL);
166 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
167 }
168
169 hw = sws->buffer_map(sws, st->hwbuf, usage);
170 assert(hw);
171 if (hw) {
172 memcpy(hw, sw, length);
173 sws->buffer_unmap(sws, st->hwbuf);
174 }
175 }
176
177 svga_transfer_dma_band(svga, st, transfer,
178 st->base.box.x, y, st->base.box.z,
179 st->base.box.width, h, st->base.box.depth,
180 0, srcy, 0, flags);
181
182 /*
183 * Prevent the texture contents to be discarded on the next band
184 * upload.
185 */
186 flags.discard = FALSE;
187
188 if (transfer == SVGA3D_READ_HOST_VRAM) {
189 svga_context_flush(svga, &fence);
190 sws->fence_finish(sws, fence, PIPE_TIMEOUT_INFINITE, 0);
191
192 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
193 assert(hw);
194 if (hw) {
195 memcpy(sw, hw, length);
196 sws->buffer_unmap(sws, st->hwbuf);
197 }
198 }
199 }
200 }
201 }
202
203
204
205 static boolean
206 svga_texture_get_handle(struct pipe_screen *screen,
207 struct pipe_resource *texture,
208 struct winsys_handle *whandle)
209 {
210 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
211 unsigned stride;
212
213 assert(svga_texture(texture)->key.cachable == 0);
214 svga_texture(texture)->key.cachable = 0;
215
216 stride = util_format_get_nblocksx(texture->format, texture->width0) *
217 util_format_get_blocksize(texture->format);
218
219 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
220 stride, whandle);
221 }
222
223
224 static void
225 svga_texture_destroy(struct pipe_screen *screen,
226 struct pipe_resource *pt)
227 {
228 struct svga_screen *ss = svga_screen(screen);
229 struct svga_texture *tex = svga_texture(pt);
230
231 ss->texture_timestamp++;
232
233 svga_sampler_view_reference(&tex->cached_view, NULL);
234
235 /*
236 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
237 */
238 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
239 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
240
241 /* Destroy the backed surface handle if exists */
242 if (tex->backed_handle)
243 svga_screen_surface_destroy(ss, &tex->backed_key, &tex->backed_handle);
244
245 ss->hud.total_resource_bytes -= tex->size;
246
247 FREE(tex->defined);
248 FREE(tex->rendered_to);
249 FREE(tex->dirty);
250 FREE(tex);
251
252 assert(ss->hud.num_resources > 0);
253 if (ss->hud.num_resources > 0)
254 ss->hud.num_resources--;
255 }
256
257
258 /**
259 * Determine if the resource was rendered to
260 */
261 static inline boolean
262 was_tex_rendered_to(struct pipe_resource *resource,
263 const struct pipe_transfer *transfer)
264 {
265 unsigned layer_face;
266
267 switch (resource->target) {
268 case PIPE_TEXTURE_CUBE:
269 assert(transfer->box.depth == 1);
270 case PIPE_TEXTURE_1D_ARRAY:
271 case PIPE_TEXTURE_2D_ARRAY:
272 case PIPE_TEXTURE_CUBE_ARRAY:
273 layer_face = transfer->box.z;
274 break;
275 default:
276 layer_face = 0;
277 }
278
279 return svga_was_texture_rendered_to(svga_texture(resource),
280 layer_face, transfer->level);
281 }
282
283
284 /**
285 * Determine if we need to read back a texture image before mapping it.
286 */
287 static inline boolean
288 need_tex_readback(struct pipe_transfer *transfer)
289 {
290 if (transfer->usage & PIPE_TRANSFER_READ)
291 return TRUE;
292
293 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
294 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
295 return was_tex_rendered_to(transfer->resource, transfer);
296 }
297
298 return FALSE;
299 }
300
301
302 static enum pipe_error
303 readback_image_vgpu9(struct svga_context *svga,
304 struct svga_winsys_surface *surf,
305 unsigned slice,
306 unsigned level)
307 {
308 enum pipe_error ret;
309
310 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
311 if (ret != PIPE_OK) {
312 svga_context_flush(svga, NULL);
313 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
314 }
315 return ret;
316 }
317
318
319 static enum pipe_error
320 readback_image_vgpu10(struct svga_context *svga,
321 struct svga_winsys_surface *surf,
322 unsigned slice,
323 unsigned level,
324 unsigned numMipLevels)
325 {
326 enum pipe_error ret;
327 unsigned subResource;
328
329 subResource = slice * numMipLevels + level;
330 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
331 if (ret != PIPE_OK) {
332 svga_context_flush(svga, NULL);
333 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
334 }
335 return ret;
336 }
337
338
339 /**
340 * Use DMA for the transfer request
341 */
342 static void *
343 svga_texture_transfer_map_dma(struct svga_context *svga,
344 struct svga_transfer *st)
345 {
346 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
347 struct pipe_resource *texture = st->base.resource;
348 unsigned nblocksx, nblocksy;
349 unsigned d;
350 unsigned usage = st->base.usage;
351
352 /* we'll put the data into a tightly packed buffer */
353 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
354 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
355 d = st->base.box.depth;
356
357 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
358 st->base.layer_stride = st->base.stride * nblocksy;
359 st->hw_nblocksy = nblocksy;
360
361 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
362 st->hw_nblocksy * st->base.stride * d);
363
364 while (!st->hwbuf && (st->hw_nblocksy /= 2)) {
365 st->hwbuf =
366 svga_winsys_buffer_create(svga, 1, 0,
367 st->hw_nblocksy * st->base.stride * d);
368 }
369
370 if (!st->hwbuf)
371 return NULL;
372
373 if (st->hw_nblocksy < nblocksy) {
374 /* We couldn't allocate a hardware buffer big enough for the transfer,
375 * so allocate regular malloc memory instead
376 */
377 if (0) {
378 debug_printf("%s: failed to allocate %u KB of DMA, "
379 "splitting into %u x %u KB DMA transfers\n",
380 __FUNCTION__,
381 (nblocksy * st->base.stride + 1023) / 1024,
382 (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
383 (st->hw_nblocksy * st->base.stride + 1023) / 1024);
384 }
385
386 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
387 if (!st->swbuf) {
388 sws->buffer_destroy(sws, st->hwbuf);
389 return NULL;
390 }
391 }
392
393 if (usage & PIPE_TRANSFER_READ) {
394 SVGA3dSurfaceDMAFlags flags;
395 memset(&flags, 0, sizeof flags);
396 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
397 }
398
399 if (st->swbuf) {
400 return st->swbuf;
401 }
402 else {
403 return sws->buffer_map(sws, st->hwbuf, usage);
404 }
405 }
406
407
408 /**
409 * Use direct map for the transfer request
410 */
411 static void *
412 svga_texture_transfer_map_direct(struct svga_context *svga,
413 struct svga_transfer *st)
414 {
415 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
416 struct pipe_transfer *transfer = &st->base;
417 struct pipe_resource *texture = transfer->resource;
418 struct svga_texture *tex = svga_texture(texture);
419 struct svga_winsys_surface *surf = tex->handle;
420 unsigned level = st->base.level;
421 unsigned w, h, nblocksx, nblocksy, i;
422 unsigned usage = st->base.usage;
423
424 if (need_tex_readback(transfer)) {
425 enum pipe_error ret;
426
427 svga_surfaces_flush(svga);
428
429 for (i = 0; i < st->base.box.depth; i++) {
430 if (svga_have_vgpu10(svga)) {
431 ret = readback_image_vgpu10(svga, surf, st->slice + i, level,
432 tex->b.b.last_level + 1);
433 } else {
434 ret = readback_image_vgpu9(svga, surf, st->slice + i, level);
435 }
436 }
437 svga->hud.num_readbacks++;
438 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
439
440 assert(ret == PIPE_OK);
441 (void) ret;
442
443 svga_context_flush(svga, NULL);
444 /*
445 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
446 * we could potentially clear the flag for all faces/layers/mips.
447 */
448 svga_clear_texture_rendered_to(tex, st->slice, level);
449 }
450 else {
451 assert(usage & PIPE_TRANSFER_WRITE);
452 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
453 if (svga_is_texture_dirty(tex, st->slice, level)) {
454 /*
455 * do a surface flush if the subresource has been modified
456 * in this command buffer.
457 */
458 svga_surfaces_flush(svga);
459 if (!sws->surface_is_flushed(sws, surf)) {
460 svga->hud.surface_write_flushes++;
461 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
462 svga_context_flush(svga, NULL);
463 }
464 }
465 }
466 }
467
468 /* we'll directly access the guest-backed surface */
469 w = u_minify(texture->width0, level);
470 h = u_minify(texture->height0, level);
471 nblocksx = util_format_get_nblocksx(texture->format, w);
472 nblocksy = util_format_get_nblocksy(texture->format, h);
473 st->hw_nblocksy = nblocksy;
474 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
475 st->base.layer_stride = st->base.stride * nblocksy;
476
477 /*
478 * Begin mapping code
479 */
480 {
481 SVGA3dSize baseLevelSize;
482 uint8_t *map;
483 boolean retry;
484 unsigned offset, mip_width, mip_height;
485
486 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
487 if (map == NULL && retry) {
488 /*
489 * At this point, the svga_surfaces_flush() should already have
490 * called in svga_texture_get_transfer().
491 */
492 svga->hud.surface_write_flushes++;
493 svga_context_flush(svga, NULL);
494 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
495 }
496
497 /*
498 * Make sure we return NULL if the map fails
499 */
500 if (!map) {
501 return NULL;
502 }
503
504 /**
505 * Compute the offset to the specific texture slice in the buffer.
506 */
507 baseLevelSize.width = tex->b.b.width0;
508 baseLevelSize.height = tex->b.b.height0;
509 baseLevelSize.depth = tex->b.b.depth0;
510
511 if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
512 (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY) ||
513 (tex->b.b.target == PIPE_TEXTURE_CUBE_ARRAY)) {
514 st->base.layer_stride =
515 svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
516 tex->b.b.last_level + 1, 1, 0);
517 }
518
519 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
520 tex->b.b.last_level + 1, /* numMips */
521 st->slice, level);
522 if (level > 0) {
523 assert(offset > 0);
524 }
525
526 mip_width = u_minify(tex->b.b.width0, level);
527 mip_height = u_minify(tex->b.b.height0, level);
528
529 offset += svga3dsurface_get_pixel_offset(tex->key.format,
530 mip_width, mip_height,
531 st->base.box.x,
532 st->base.box.y,
533 st->base.box.z);
534
535 return (void *) (map + offset);
536 }
537 }
538
539
540 /**
541 * Request a transfer map to the texture resource
542 */
543 static void *
544 svga_texture_transfer_map(struct pipe_context *pipe,
545 struct pipe_resource *texture,
546 unsigned level,
547 unsigned usage,
548 const struct pipe_box *box,
549 struct pipe_transfer **ptransfer)
550 {
551 struct svga_context *svga = svga_context(pipe);
552 struct svga_winsys_screen *sws = svga_screen(pipe->screen)->sws;
553 struct svga_texture *tex = svga_texture(texture);
554 struct svga_transfer *st;
555 struct svga_winsys_surface *surf = tex->handle;
556 boolean use_direct_map = svga_have_gb_objects(svga) &&
557 !svga_have_gb_dma(svga);
558 void *map = NULL;
559 int64_t begin = svga_get_time(svga);
560
561 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
562
563 if (!surf)
564 goto done;
565
566 /* We can't map texture storage directly unless we have GB objects */
567 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
568 if (svga_have_gb_objects(svga))
569 use_direct_map = TRUE;
570 else
571 goto done;
572 }
573
574 st = CALLOC_STRUCT(svga_transfer);
575 if (!st)
576 goto done;
577
578 st->base.level = level;
579 st->base.usage = usage;
580 st->base.box = *box;
581
582 switch (tex->b.b.target) {
583 case PIPE_TEXTURE_CUBE:
584 st->slice = st->base.box.z;
585 st->base.box.z = 0; /* so we don't apply double offsets below */
586 break;
587 case PIPE_TEXTURE_1D_ARRAY:
588 case PIPE_TEXTURE_2D_ARRAY:
589 case PIPE_TEXTURE_CUBE_ARRAY:
590 st->slice = st->base.box.z;
591 st->base.box.z = 0; /* so we don't apply double offsets below */
592
593 /* Force direct map for transfering multiple slices */
594 if (st->base.box.depth > 1)
595 use_direct_map = svga_have_gb_objects(svga);
596
597 break;
598 default:
599 st->slice = 0;
600 break;
601 }
602
603 /* Force direct map for multisample surface */
604 if (texture->nr_samples > 1) {
605 assert(svga_have_gb_objects(svga));
606 assert(sws->have_sm4_1);
607 use_direct_map = TRUE;
608 }
609
610 st->use_direct_map = use_direct_map;
611 pipe_resource_reference(&st->base.resource, texture);
612
613 /* If this is the first time mapping to the surface in this
614 * command buffer, clear the dirty masks of this surface.
615 */
616 if (sws->surface_is_flushed(sws, surf)) {
617 svga_clear_texture_dirty(tex);
618 }
619
620 if (!use_direct_map) {
621 /* upload to the DMA buffer */
622 map = svga_texture_transfer_map_dma(svga, st);
623 }
624 else {
625 boolean can_use_upload = tex->can_use_upload &&
626 !(st->base.usage & PIPE_TRANSFER_READ);
627 boolean was_rendered_to = was_tex_rendered_to(texture, &st->base);
628
629 /* If the texture was already rendered to and upload buffer
630 * is supported, then we will use upload buffer to
631 * avoid the need to read back the texture content; otherwise,
632 * we'll first try to map directly to the GB surface, if it is blocked,
633 * then we'll try the upload buffer.
634 */
635 if (was_rendered_to && can_use_upload) {
636 map = svga_texture_transfer_map_upload(svga, st);
637 }
638 else {
639 unsigned orig_usage = st->base.usage;
640
641 /* First try directly map to the GB surface */
642 if (can_use_upload)
643 st->base.usage |= PIPE_TRANSFER_DONTBLOCK;
644 map = svga_texture_transfer_map_direct(svga, st);
645 st->base.usage = orig_usage;
646
647 if (!map && can_use_upload) {
648 /* if direct map with DONTBLOCK fails, then try upload to the
649 * texture upload buffer.
650 */
651 map = svga_texture_transfer_map_upload(svga, st);
652 }
653 }
654
655 /* If upload fails, then try direct map again without forcing it
656 * to DONTBLOCK.
657 */
658 if (!map) {
659 map = svga_texture_transfer_map_direct(svga, st);
660 }
661 }
662
663 if (!map) {
664 FREE(st);
665 }
666 else {
667 *ptransfer = &st->base;
668 svga->hud.num_textures_mapped++;
669 if (usage & PIPE_TRANSFER_WRITE) {
670 /* record texture upload for HUD */
671 svga->hud.num_bytes_uploaded +=
672 st->base.layer_stride * st->base.box.depth;
673
674 /* mark this texture level as dirty */
675 svga_set_texture_dirty(tex, st->slice, level);
676 }
677 }
678
679 done:
680 svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
681 SVGA_STATS_TIME_POP(sws);
682 (void) sws;
683
684 return map;
685 }
686
687 /**
688 * Unmap a GB texture surface.
689 */
690 static void
691 svga_texture_surface_unmap(struct svga_context *svga,
692 struct pipe_transfer *transfer)
693 {
694 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
695 struct svga_winsys_context *swc = svga->swc;
696 boolean rebind;
697
698 assert(surf);
699
700 swc->surface_unmap(swc, surf, &rebind);
701 if (rebind) {
702 enum pipe_error ret;
703 ret = SVGA3D_BindGBSurface(swc, surf);
704 if (ret != PIPE_OK) {
705 /* flush and retry */
706 svga_context_flush(svga, NULL);
707 ret = SVGA3D_BindGBSurface(swc, surf);
708 assert(ret == PIPE_OK);
709 }
710 }
711 }
712
713
714 static enum pipe_error
715 update_image_vgpu9(struct svga_context *svga,
716 struct svga_winsys_surface *surf,
717 const SVGA3dBox *box,
718 unsigned slice,
719 unsigned level)
720 {
721 enum pipe_error ret;
722
723 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
724 if (ret != PIPE_OK) {
725 svga_context_flush(svga, NULL);
726 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
727 }
728 return ret;
729 }
730
731
732 static enum pipe_error
733 update_image_vgpu10(struct svga_context *svga,
734 struct svga_winsys_surface *surf,
735 const SVGA3dBox *box,
736 unsigned slice,
737 unsigned level,
738 unsigned numMipLevels)
739 {
740 enum pipe_error ret;
741 unsigned subResource;
742
743 subResource = slice * numMipLevels + level;
744
745 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
746 if (ret != PIPE_OK) {
747 svga_context_flush(svga, NULL);
748 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
749 }
750 return ret;
751 }
752
753
754 /**
755 * unmap DMA transfer request
756 */
757 static void
758 svga_texture_transfer_unmap_dma(struct svga_context *svga,
759 struct svga_transfer *st)
760 {
761 struct svga_winsys_screen *sws = svga_screen(svga->pipe.screen)->sws;
762
763 if (st->hwbuf)
764 sws->buffer_unmap(sws, st->hwbuf);
765
766 if (st->base.usage & PIPE_TRANSFER_WRITE) {
767 /* Use DMA to transfer texture data */
768 SVGA3dSurfaceDMAFlags flags;
769
770 memset(&flags, 0, sizeof flags);
771 if (st->base.usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
772 flags.discard = TRUE;
773 }
774 if (st->base.usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
775 flags.unsynchronized = TRUE;
776 }
777
778 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
779 }
780
781 FREE(st->swbuf);
782 sws->buffer_destroy(sws, st->hwbuf);
783 }
784
785
786 /**
787 * unmap direct map transfer request
788 */
789 static void
790 svga_texture_transfer_unmap_direct(struct svga_context *svga,
791 struct svga_transfer *st)
792 {
793 struct pipe_transfer *transfer = &st->base;
794 struct svga_texture *tex = svga_texture(transfer->resource);
795
796 svga_texture_surface_unmap(svga, transfer);
797
798 /* Now send an update command to update the content in the backend. */
799 if (st->base.usage & PIPE_TRANSFER_WRITE) {
800 struct svga_winsys_surface *surf = tex->handle;
801 SVGA3dBox box;
802 enum pipe_error ret;
803 unsigned nlayers = 1;
804
805 assert(svga_have_gb_objects(svga));
806
807 /* update the effected region */
808 box.x = transfer->box.x;
809 box.y = transfer->box.y;
810 box.w = transfer->box.width;
811 box.h = transfer->box.height;
812 box.d = transfer->box.depth;
813
814 switch (tex->b.b.target) {
815 case PIPE_TEXTURE_CUBE:
816 box.z = 0;
817 break;
818 case PIPE_TEXTURE_2D_ARRAY:
819 case PIPE_TEXTURE_CUBE_ARRAY:
820 nlayers = box.d;
821 box.z = 0;
822 box.d = 1;
823 break;
824 case PIPE_TEXTURE_1D_ARRAY:
825 nlayers = box.d;
826 box.y = box.z = 0;
827 box.d = 1;
828 break;
829 default:
830 box.z = transfer->box.z;
831 break;
832 }
833
834 if (0)
835 debug_printf("%s %d, %d, %d %d x %d x %d\n",
836 __FUNCTION__,
837 box.x, box.y, box.z,
838 box.w, box.h, box.d);
839
840 if (svga_have_vgpu10(svga)) {
841 unsigned i;
842
843 for (i = 0; i < nlayers; i++) {
844 ret = update_image_vgpu10(svga, surf, &box,
845 st->slice + i, transfer->level,
846 tex->b.b.last_level + 1);
847 assert(ret == PIPE_OK);
848 }
849 } else {
850 assert(nlayers == 1);
851 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
852 assert(ret == PIPE_OK);
853 }
854 (void) ret;
855 }
856 }
857
858 static void
859 svga_texture_transfer_unmap(struct pipe_context *pipe,
860 struct pipe_transfer *transfer)
861 {
862 struct svga_context *svga = svga_context(pipe);
863 struct svga_screen *ss = svga_screen(pipe->screen);
864 struct svga_winsys_screen *sws = ss->sws;
865 struct svga_transfer *st = svga_transfer(transfer);
866 struct svga_texture *tex = svga_texture(transfer->resource);
867
868 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
869
870 if (!st->use_direct_map) {
871 svga_texture_transfer_unmap_dma(svga, st);
872 }
873 else if (st->upload.buf) {
874 svga_texture_transfer_unmap_upload(svga, st);
875 }
876 else {
877 svga_texture_transfer_unmap_direct(svga, st);
878 }
879
880 if (st->base.usage & PIPE_TRANSFER_WRITE) {
881 svga->hud.num_resource_updates++;
882
883 /* Mark the texture level as dirty */
884 ss->texture_timestamp++;
885 svga_age_texture_view(tex, transfer->level);
886 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
887 svga_define_texture_level(tex, st->slice, transfer->level);
888 else
889 svga_define_texture_level(tex, 0, transfer->level);
890 }
891
892 pipe_resource_reference(&st->base.resource, NULL);
893 FREE(st);
894 SVGA_STATS_TIME_POP(sws);
895 (void) sws;
896 }
897
898
899 /**
900 * Does format store depth values?
901 */
902 static inline boolean
903 format_has_depth(enum pipe_format format)
904 {
905 const struct util_format_description *desc = util_format_description(format);
906 return util_format_has_depth(desc);
907 }
908
909
910 struct u_resource_vtbl svga_texture_vtbl =
911 {
912 svga_texture_get_handle, /* get_handle */
913 svga_texture_destroy, /* resource_destroy */
914 svga_texture_transfer_map, /* transfer_map */
915 u_default_transfer_flush_region, /* transfer_flush_region */
916 svga_texture_transfer_unmap, /* transfer_unmap */
917 };
918
919
920 struct pipe_resource *
921 svga_texture_create(struct pipe_screen *screen,
922 const struct pipe_resource *template)
923 {
924 struct svga_screen *svgascreen = svga_screen(screen);
925 struct svga_texture *tex;
926 unsigned bindings = template->bind;
927
928 SVGA_STATS_TIME_PUSH(svgascreen->sws,
929 SVGA_STATS_TIME_CREATETEXTURE);
930
931 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
932 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
933 goto fail_notex;
934 }
935
936 /* Verify the number of mipmap levels isn't impossibly large. For example,
937 * if the base 2D image is 16x16, we can't have 8 mipmap levels.
938 * The state tracker should never ask us to create a resource with invalid
939 * parameters.
940 */
941 {
942 unsigned max_dim = template->width0;
943
944 switch (template->target) {
945 case PIPE_TEXTURE_1D:
946 case PIPE_TEXTURE_1D_ARRAY:
947 // nothing
948 break;
949 case PIPE_TEXTURE_2D:
950 case PIPE_TEXTURE_CUBE:
951 case PIPE_TEXTURE_CUBE_ARRAY:
952 case PIPE_TEXTURE_2D_ARRAY:
953 max_dim = MAX2(max_dim, template->height0);
954 break;
955 case PIPE_TEXTURE_3D:
956 max_dim = MAX3(max_dim, template->height0, template->depth0);
957 break;
958 case PIPE_TEXTURE_RECT:
959 case PIPE_BUFFER:
960 assert(template->last_level == 0);
961 /* the assertion below should always pass */
962 break;
963 default:
964 debug_printf("Unexpected texture target type\n");
965 }
966 assert(1 << template->last_level <= max_dim);
967 }
968
969 tex = CALLOC_STRUCT(svga_texture);
970 if (!tex) {
971 goto fail_notex;
972 }
973
974 tex->defined = CALLOC(template->depth0 * template->array_size,
975 sizeof(tex->defined[0]));
976 if (!tex->defined) {
977 FREE(tex);
978 goto fail_notex;
979 }
980
981 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
982 sizeof(tex->rendered_to[0]));
983 if (!tex->rendered_to) {
984 goto fail;
985 }
986
987 tex->dirty = CALLOC(template->depth0 * template->array_size,
988 sizeof(tex->dirty[0]));
989 if (!tex->dirty) {
990 goto fail;
991 }
992
993 tex->b.b = *template;
994 tex->b.vtbl = &svga_texture_vtbl;
995 pipe_reference_init(&tex->b.b.reference, 1);
996 tex->b.b.screen = screen;
997
998 tex->key.flags = 0;
999 tex->key.size.width = template->width0;
1000 tex->key.size.height = template->height0;
1001 tex->key.size.depth = template->depth0;
1002 tex->key.arraySize = 1;
1003 tex->key.numFaces = 1;
1004
1005 /* nr_samples=1 must be treated as a non-multisample texture */
1006 if (tex->b.b.nr_samples == 1) {
1007 tex->b.b.nr_samples = 0;
1008 }
1009 else if (tex->b.b.nr_samples > 1) {
1010 assert(svgascreen->sws->have_sm4_1);
1011 tex->key.flags |= SVGA3D_SURFACE_MULTISAMPLE;
1012 }
1013
1014 tex->key.sampleCount = tex->b.b.nr_samples;
1015
1016 if (svgascreen->sws->have_vgpu10) {
1017 switch (template->target) {
1018 case PIPE_TEXTURE_1D:
1019 tex->key.flags |= SVGA3D_SURFACE_1D;
1020 break;
1021 case PIPE_TEXTURE_1D_ARRAY:
1022 tex->key.flags |= SVGA3D_SURFACE_1D;
1023 /* fall-through */
1024 case PIPE_TEXTURE_2D_ARRAY:
1025 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
1026 tex->key.arraySize = template->array_size;
1027 break;
1028 case PIPE_TEXTURE_3D:
1029 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
1030 break;
1031 case PIPE_TEXTURE_CUBE:
1032 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
1033 tex->key.numFaces = 6;
1034 break;
1035 case PIPE_TEXTURE_CUBE_ARRAY:
1036 assert(svgascreen->sws->have_sm4_1);
1037 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
1038 tex->key.numFaces = 1; // arraySize already includes the 6 faces
1039 tex->key.arraySize = template->array_size;
1040 break;
1041 default:
1042 break;
1043 }
1044 }
1045 else {
1046 switch (template->target) {
1047 case PIPE_TEXTURE_3D:
1048 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
1049 break;
1050 case PIPE_TEXTURE_CUBE:
1051 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
1052 tex->key.numFaces = 6;
1053 break;
1054 default:
1055 break;
1056 }
1057 }
1058
1059 tex->key.cachable = 1;
1060
1061 if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
1062 !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
1063 /* Also check if the format can be sampled from */
1064 if (screen->is_format_supported(screen, template->format,
1065 template->target,
1066 template->nr_samples,
1067 template->nr_storage_samples,
1068 PIPE_BIND_SAMPLER_VIEW)) {
1069 bindings |= PIPE_BIND_SAMPLER_VIEW;
1070 }
1071 }
1072
1073 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
1074 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
1075 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
1076
1077 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
1078 /* Also check if the format is color renderable */
1079 if (screen->is_format_supported(screen, template->format,
1080 template->target,
1081 template->nr_samples,
1082 template->nr_storage_samples,
1083 PIPE_BIND_RENDER_TARGET)) {
1084 bindings |= PIPE_BIND_RENDER_TARGET;
1085 }
1086 }
1087
1088 if (!(bindings & PIPE_BIND_DEPTH_STENCIL)) {
1089 /* Also check if the format is depth/stencil renderable */
1090 if (screen->is_format_supported(screen, template->format,
1091 template->target,
1092 template->nr_samples,
1093 template->nr_storage_samples,
1094 PIPE_BIND_DEPTH_STENCIL)) {
1095 bindings |= PIPE_BIND_DEPTH_STENCIL;
1096 }
1097 }
1098 }
1099
1100 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
1101 tex->key.cachable = 0;
1102 }
1103
1104 if (bindings & PIPE_BIND_SHARED) {
1105 tex->key.cachable = 0;
1106 }
1107
1108 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
1109 tex->key.scanout = 1;
1110 tex->key.cachable = 0;
1111 }
1112
1113 /*
1114 * Note: Previously we never passed the
1115 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
1116 * know beforehand whether a texture will be used as a rendertarget or not
1117 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
1118 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
1119 *
1120 * However, this was changed since other state trackers
1121 * (XA for example) uses it accurately and certain device versions
1122 * relies on it in certain situations to render correctly.
1123 */
1124 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
1125 !util_format_is_s3tc(template->format)) {
1126 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1127 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1128 }
1129
1130 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
1131 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
1132 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
1133 }
1134
1135 tex->key.numMipLevels = template->last_level + 1;
1136
1137 tex->key.format = svga_translate_format(svgascreen, template->format,
1138 bindings);
1139 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
1140 goto fail;
1141 }
1142
1143 /* Use typeless formats for sRGB and depth resources. Typeless
1144 * formats can be reinterpreted as other formats. For example,
1145 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
1146 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
1147 */
1148 if (svgascreen->sws->have_vgpu10 &&
1149 (util_format_is_srgb(template->format) ||
1150 format_has_depth(template->format))) {
1151 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
1152 if (0) {
1153 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
1154 svga_format_name(tex->key.format),
1155 svga_format_name(typeless),
1156 bindings);
1157 }
1158
1159 if (svga_format_is_uncompressed_snorm(tex->key.format)) {
1160 /* We can't normally render to snorm surfaces, but once we
1161 * substitute a typeless format, we can if the rendertarget view
1162 * is unorm. This can happen with GL_ARB_copy_image.
1163 */
1164 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
1165 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
1166 }
1167
1168 tex->key.format = typeless;
1169 }
1170
1171 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
1172 tex->handle = svga_screen_surface_create(svgascreen, bindings,
1173 tex->b.b.usage,
1174 &tex->validated, &tex->key);
1175 if (!tex->handle) {
1176 goto fail;
1177 }
1178
1179 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
1180
1181 debug_reference(&tex->b.b.reference,
1182 (debug_reference_descriptor)debug_describe_resource, 0);
1183
1184 tex->size = util_resource_size(template);
1185
1186 /* Determine if texture upload buffer can be used to upload this texture */
1187 tex->can_use_upload = svga_texture_transfer_map_can_upload(svgascreen,
1188 &tex->b.b);
1189
1190 /* Initialize the backing resource cache */
1191 tex->backed_handle = NULL;
1192
1193 svgascreen->hud.total_resource_bytes += tex->size;
1194 svgascreen->hud.num_resources++;
1195
1196 SVGA_STATS_TIME_POP(svgascreen->sws);
1197
1198 return &tex->b.b;
1199
1200 fail:
1201 if (tex->dirty)
1202 FREE(tex->dirty);
1203 if (tex->rendered_to)
1204 FREE(tex->rendered_to);
1205 if (tex->defined)
1206 FREE(tex->defined);
1207 FREE(tex);
1208 fail_notex:
1209 SVGA_STATS_TIME_POP(svgascreen->sws);
1210 return NULL;
1211 }
1212
1213
1214 struct pipe_resource *
1215 svga_texture_from_handle(struct pipe_screen *screen,
1216 const struct pipe_resource *template,
1217 struct winsys_handle *whandle)
1218 {
1219 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1220 struct svga_screen *ss = svga_screen(screen);
1221 struct svga_winsys_surface *srf;
1222 struct svga_texture *tex;
1223 enum SVGA3dSurfaceFormat format = 0;
1224 assert(screen);
1225
1226 /* Only supports one type */
1227 if ((template->target != PIPE_TEXTURE_2D &&
1228 template->target != PIPE_TEXTURE_RECT) ||
1229 template->last_level != 0 ||
1230 template->depth0 != 1) {
1231 return NULL;
1232 }
1233
1234 srf = sws->surface_from_handle(sws, whandle, &format);
1235
1236 if (!srf)
1237 return NULL;
1238
1239 if (!svga_format_is_shareable(ss, template->format, format,
1240 template->bind, true))
1241 goto out_unref;
1242
1243 tex = CALLOC_STRUCT(svga_texture);
1244 if (!tex)
1245 goto out_unref;
1246
1247 tex->defined = CALLOC(template->depth0 * template->array_size,
1248 sizeof(tex->defined[0]));
1249 if (!tex->defined)
1250 goto out_no_defined;
1251
1252 tex->b.b = *template;
1253 tex->b.vtbl = &svga_texture_vtbl;
1254 pipe_reference_init(&tex->b.b.reference, 1);
1255 tex->b.b.screen = screen;
1256
1257 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1258
1259 tex->key.cachable = 0;
1260 tex->key.format = format;
1261 tex->handle = srf;
1262
1263 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1264 if (!tex->rendered_to)
1265 goto out_no_rendered_to;
1266
1267 tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1268 if (!tex->dirty)
1269 goto out_no_dirty;
1270
1271 tex->imported = TRUE;
1272
1273 ss->hud.num_resources++;
1274
1275 return &tex->b.b;
1276
1277 out_no_dirty:
1278 FREE(tex->rendered_to);
1279 out_no_rendered_to:
1280 FREE(tex->defined);
1281 out_no_defined:
1282 FREE(tex);
1283 out_unref:
1284 sws->surface_reference(sws, &srf, NULL);
1285 return NULL;
1286 }
1287
1288 boolean
1289 svga_texture_generate_mipmap(struct pipe_context *pipe,
1290 struct pipe_resource *pt,
1291 enum pipe_format format,
1292 unsigned base_level,
1293 unsigned last_level,
1294 unsigned first_layer,
1295 unsigned last_layer)
1296 {
1297 struct pipe_sampler_view templ, *psv;
1298 struct svga_pipe_sampler_view *sv;
1299 struct svga_context *svga = svga_context(pipe);
1300 struct svga_texture *tex = svga_texture(pt);
1301 enum pipe_error ret;
1302
1303 assert(svga_have_vgpu10(svga));
1304
1305 /* Only support 2D texture for now */
1306 if (pt->target != PIPE_TEXTURE_2D)
1307 return FALSE;
1308
1309 /* Fallback to the mipmap generation utility for those formats that
1310 * do not support hw generate mipmap
1311 */
1312 if (!svga_format_support_gen_mips(format))
1313 return FALSE;
1314
1315 /* Make sure the texture surface was created with
1316 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1317 */
1318 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1319 return FALSE;
1320
1321 templ.format = format;
1322 templ.u.tex.first_layer = first_layer;
1323 templ.u.tex.last_layer = last_layer;
1324 templ.u.tex.first_level = base_level;
1325 templ.u.tex.last_level = last_level;
1326
1327 psv = pipe->create_sampler_view(pipe, pt, &templ);
1328 if (psv == NULL)
1329 return FALSE;
1330
1331 sv = svga_pipe_sampler_view(psv);
1332 ret = svga_validate_pipe_sampler_view(svga, sv);
1333 if (ret != PIPE_OK) {
1334 svga_context_flush(svga, NULL);
1335 ret = svga_validate_pipe_sampler_view(svga, sv);
1336 assert(ret == PIPE_OK);
1337 }
1338
1339 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1340 if (ret != PIPE_OK) {
1341 svga_context_flush(svga, NULL);
1342 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1343 }
1344 pipe_sampler_view_reference(&psv, NULL);
1345
1346 svga->hud.num_generate_mipmap++;
1347
1348 return TRUE;
1349 }
1350
1351
1352 /* texture upload buffer default size in bytes */
1353 #define TEX_UPLOAD_DEFAULT_SIZE (1024 * 1024)
1354
1355 /**
1356 * Create a texture upload buffer
1357 */
1358 boolean
1359 svga_texture_transfer_map_upload_create(struct svga_context *svga)
1360 {
1361 svga->tex_upload = u_upload_create(&svga->pipe, TEX_UPLOAD_DEFAULT_SIZE,
1362 0, PIPE_USAGE_STAGING, 0);
1363 return svga->tex_upload != NULL;
1364 }
1365
1366
1367 /**
1368 * Destroy the texture upload buffer
1369 */
1370 void
1371 svga_texture_transfer_map_upload_destroy(struct svga_context *svga)
1372 {
1373 u_upload_destroy(svga->tex_upload);
1374 }
1375
1376
1377 /**
1378 * Returns true if this transfer map request can use the upload buffer.
1379 */
1380 boolean
1381 svga_texture_transfer_map_can_upload(const struct svga_screen *svgascreen,
1382 const struct pipe_resource *texture)
1383 {
1384 if (svgascreen->sws->have_transfer_from_buffer_cmd == FALSE)
1385 return FALSE;
1386
1387 /* TransferFromBuffer command is not well supported with multi-samples surface */
1388 if (texture->nr_samples > 1)
1389 return FALSE;
1390
1391 if (util_format_is_compressed(texture->format)) {
1392 /* XXX Need to take a closer look to see why texture upload
1393 * with 3D texture with compressed format fails
1394 */
1395 if (texture->target == PIPE_TEXTURE_3D)
1396 return FALSE;
1397 }
1398 else if (texture->format == PIPE_FORMAT_R9G9B9E5_FLOAT) {
1399 return FALSE;
1400 }
1401
1402 return TRUE;
1403 }
1404
1405
1406 /**
1407 * Use upload buffer for the transfer map request.
1408 */
1409 void *
1410 svga_texture_transfer_map_upload(struct svga_context *svga,
1411 struct svga_transfer *st)
1412 {
1413 struct pipe_resource *texture = st->base.resource;
1414 struct pipe_resource *tex_buffer = NULL;
1415 void *tex_map;
1416 unsigned nblocksx, nblocksy;
1417 unsigned offset;
1418 unsigned upload_size;
1419
1420 assert(svga->tex_upload);
1421
1422 st->upload.box.x = st->base.box.x;
1423 st->upload.box.y = st->base.box.y;
1424 st->upload.box.z = st->base.box.z;
1425 st->upload.box.w = st->base.box.width;
1426 st->upload.box.h = st->base.box.height;
1427 st->upload.box.d = st->base.box.depth;
1428 st->upload.nlayers = 1;
1429
1430 switch (texture->target) {
1431 case PIPE_TEXTURE_CUBE:
1432 st->upload.box.z = 0;
1433 break;
1434 case PIPE_TEXTURE_2D_ARRAY:
1435 case PIPE_TEXTURE_CUBE_ARRAY:
1436 st->upload.nlayers = st->base.box.depth;
1437 st->upload.box.z = 0;
1438 st->upload.box.d = 1;
1439 break;
1440 case PIPE_TEXTURE_1D_ARRAY:
1441 st->upload.nlayers = st->base.box.depth;
1442 st->upload.box.y = st->upload.box.z = 0;
1443 st->upload.box.d = 1;
1444 break;
1445 default:
1446 break;
1447 }
1448
1449 nblocksx = util_format_get_nblocksx(texture->format, st->base.box.width);
1450 nblocksy = util_format_get_nblocksy(texture->format, st->base.box.height);
1451
1452 st->base.stride = nblocksx * util_format_get_blocksize(texture->format);
1453 st->base.layer_stride = st->base.stride * nblocksy;
1454
1455 /* In order to use the TransferFromBuffer command to update the
1456 * texture content from the buffer, the layer stride for a multi-layers
1457 * surface needs to be in multiples of 16 bytes.
1458 */
1459 if (st->upload.nlayers > 1 && st->base.layer_stride & 15)
1460 return NULL;
1461
1462 upload_size = st->base.layer_stride * st->base.box.depth;
1463 upload_size = align(upload_size, 16);
1464
1465 #ifdef DEBUG
1466 if (util_format_is_compressed(texture->format)) {
1467 struct svga_texture *tex = svga_texture(texture);
1468 unsigned blockw, blockh, bytesPerBlock;
1469
1470 svga_format_size(tex->key.format, &blockw, &blockh, &bytesPerBlock);
1471
1472 /* dest box must start on block boundary */
1473 assert((st->base.box.x % blockw) == 0);
1474 assert((st->base.box.y % blockh) == 0);
1475 }
1476 #endif
1477
1478 /* If the upload size exceeds the default buffer size, the
1479 * upload buffer manager code will try to allocate a new buffer
1480 * with the new buffer size.
1481 */
1482 u_upload_alloc(svga->tex_upload, 0, upload_size, 16,
1483 &offset, &tex_buffer, &tex_map);
1484
1485 if (!tex_map) {
1486 return NULL;
1487 }
1488
1489 st->upload.buf = tex_buffer;
1490 st->upload.map = tex_map;
1491 st->upload.offset = offset;
1492
1493 return tex_map;
1494 }
1495
1496
1497 /**
1498 * Unmap upload map transfer request
1499 */
1500 void
1501 svga_texture_transfer_unmap_upload(struct svga_context *svga,
1502 struct svga_transfer *st)
1503 {
1504 struct svga_winsys_surface *srcsurf;
1505 struct svga_winsys_surface *dstsurf;
1506 struct pipe_resource *texture = st->base.resource;
1507 struct svga_texture *tex = svga_texture(texture);
1508 enum pipe_error ret;
1509 unsigned subResource;
1510 unsigned numMipLevels;
1511 unsigned i, layer;
1512 unsigned offset = st->upload.offset;
1513
1514 assert(svga->tex_upload);
1515 assert(st->upload.buf);
1516
1517 /* unmap the texture upload buffer */
1518 u_upload_unmap(svga->tex_upload);
1519
1520 srcsurf = svga_buffer_handle(svga, st->upload.buf, 0);
1521 dstsurf = svga_texture(texture)->handle;
1522 assert(dstsurf);
1523
1524 numMipLevels = texture->last_level + 1;
1525
1526 for (i = 0, layer = st->slice; i < st->upload.nlayers; i++, layer++) {
1527 subResource = layer * numMipLevels + st->base.level;
1528
1529 /* send a transferFromBuffer command to update the host texture surface */
1530 assert((offset & 15) == 0);
1531
1532 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1533 offset,
1534 st->base.stride,
1535 st->base.layer_stride,
1536 dstsurf, subResource,
1537 &st->upload.box);
1538 if (ret != PIPE_OK) {
1539 svga_context_flush(svga, NULL);
1540 ret = SVGA3D_vgpu10_TransferFromBuffer(svga->swc, srcsurf,
1541 offset,
1542 st->base.stride,
1543 st->base.layer_stride,
1544 dstsurf, subResource,
1545 &st->upload.box);
1546 assert(ret == PIPE_OK);
1547 }
1548 offset += st->base.layer_stride;
1549
1550 /* Set rendered-to flag */
1551 svga_set_texture_rendered_to(tex, layer, st->base.level);
1552 }
1553
1554 pipe_resource_reference(&st->upload.buf, NULL);
1555 }
1556
1557 /**
1558 * Does the device format backing this surface have an
1559 * alpha channel?
1560 *
1561 * \param texture[in] The texture whose format we're querying
1562 * \return TRUE if the format has an alpha channel, FALSE otherwise
1563 *
1564 * For locally created textures, the device (svga) format is typically
1565 * identical to svga_format(texture->format), and we can use the gallium
1566 * format tests to determine whether the device format has an alpha channel
1567 * or not. However, for textures backed by imported svga surfaces that is
1568 * not always true, and we have to look at the SVGA3D utilities.
1569 */
1570 boolean
1571 svga_texture_device_format_has_alpha(struct pipe_resource *texture)
1572 {
1573 /* the svga_texture() call below is invalid for PIPE_BUFFER resources */
1574 assert(texture->target != PIPE_BUFFER);
1575
1576 enum svga3d_block_desc block_desc =
1577 svga3dsurface_get_desc(svga_texture(texture)->key.format)->block_desc;
1578
1579 return !!(block_desc & SVGA3DBLOCKDESC_ALPHA);
1580 }