1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "os/os_time.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_math.h"
36 #include "util/u_memory.h"
37 #include "util/u_resource.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
51 svga_transfer_dma_band(struct svga_context
*svga
,
52 struct svga_transfer
*st
,
53 SVGA3dTransferType transfer
,
54 unsigned y
, unsigned h
, unsigned srcy
,
55 SVGA3dSurfaceDMAFlags flags
)
57 struct svga_texture
*texture
= svga_texture(st
->base
.resource
);
61 assert(!st
->use_direct_map
);
63 box
.x
= st
->base
.box
.x
;
65 box
.z
= st
->base
.box
.z
;
66 box
.w
= st
->base
.box
.width
;
73 SVGA_DBG(DEBUG_DMA
, "dma %s sid %p, face %u, (%u, %u, %u) - "
74 "(%u, %u, %u), %ubpp\n",
75 transfer
== SVGA3D_WRITE_HOST_VRAM
? "to" : "from",
81 st
->base
.box
.x
+ st
->base
.box
.width
,
84 util_format_get_blocksize(texture
->b
.b
.format
) * 8 /
85 (util_format_get_blockwidth(texture
->b
.b
.format
)
86 * util_format_get_blockheight(texture
->b
.b
.format
)));
88 ret
= SVGA3D_SurfaceDMA(svga
->swc
, st
, transfer
, &box
, 1, flags
);
90 svga_context_flush(svga
, NULL
);
91 ret
= SVGA3D_SurfaceDMA(svga
->swc
, st
, transfer
, &box
, 1, flags
);
92 assert(ret
== PIPE_OK
);
98 svga_transfer_dma(struct svga_context
*svga
,
99 struct svga_transfer
*st
,
100 SVGA3dTransferType transfer
,
101 SVGA3dSurfaceDMAFlags flags
)
103 struct svga_texture
*texture
= svga_texture(st
->base
.resource
);
104 struct svga_screen
*screen
= svga_screen(texture
->b
.b
.screen
);
105 struct svga_winsys_screen
*sws
= screen
->sws
;
106 struct pipe_fence_handle
*fence
= NULL
;
108 assert(!st
->use_direct_map
);
110 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
111 SVGA_DBG(DEBUG_PERF
, "%s: readback transfer\n", __FUNCTION__
);
114 /* Ensure any pending operations on host surfaces are queued on the command
117 svga_surfaces_flush( svga
);
120 /* Do the DMA transfer in a single go */
121 svga_transfer_dma_band(svga
, st
, transfer
,
122 st
->base
.box
.y
, st
->base
.box
.height
, 0,
125 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
126 svga_context_flush(svga
, &fence
);
127 sws
->fence_finish(sws
, fence
, 0);
128 sws
->fence_reference(sws
, &fence
, NULL
);
133 unsigned blockheight
=
134 util_format_get_blockheight(st
->base
.resource
->format
);
136 h
= st
->hw_nblocksy
* blockheight
;
139 for (y
= 0; y
< st
->base
.box
.height
; y
+= h
) {
140 unsigned offset
, length
;
143 if (y
+ h
> st
->base
.box
.height
)
144 h
= st
->base
.box
.height
- y
;
146 /* Transfer band must be aligned to pixel block boundaries */
147 assert(y
% blockheight
== 0);
148 assert(h
% blockheight
== 0);
150 offset
= y
* st
->base
.stride
/ blockheight
;
151 length
= h
* st
->base
.stride
/ blockheight
;
153 sw
= (uint8_t *) st
->swbuf
+ offset
;
155 if (transfer
== SVGA3D_WRITE_HOST_VRAM
) {
156 unsigned usage
= PIPE_TRANSFER_WRITE
;
158 /* Wait for the previous DMAs to complete */
159 /* TODO: keep one DMA (at half the size) in the background */
161 svga_context_flush(svga
, NULL
);
162 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
165 hw
= sws
->buffer_map(sws
, st
->hwbuf
, usage
);
168 memcpy(hw
, sw
, length
);
169 sws
->buffer_unmap(sws
, st
->hwbuf
);
173 svga_transfer_dma_band(svga
, st
, transfer
, y
, h
, srcy
, flags
);
176 * Prevent the texture contents to be discarded on the next band
179 flags
.discard
= FALSE
;
181 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
182 svga_context_flush(svga
, &fence
);
183 sws
->fence_finish(sws
, fence
, 0);
185 hw
= sws
->buffer_map(sws
, st
->hwbuf
, PIPE_TRANSFER_READ
);
188 memcpy(sw
, hw
, length
);
189 sws
->buffer_unmap(sws
, st
->hwbuf
);
198 svga_texture_get_handle(struct pipe_screen
*screen
,
199 struct pipe_resource
*texture
,
200 struct winsys_handle
*whandle
)
202 struct svga_winsys_screen
*sws
= svga_winsys_screen(texture
->screen
);
205 assert(svga_texture(texture
)->key
.cachable
== 0);
206 svga_texture(texture
)->key
.cachable
= 0;
208 stride
= util_format_get_nblocksx(texture
->format
, texture
->width0
) *
209 util_format_get_blocksize(texture
->format
);
211 return sws
->surface_get_handle(sws
, svga_texture(texture
)->handle
,
217 svga_texture_destroy(struct pipe_screen
*screen
,
218 struct pipe_resource
*pt
)
220 struct svga_screen
*ss
= svga_screen(screen
);
221 struct svga_texture
*tex
= svga_texture(pt
);
223 ss
->texture_timestamp
++;
225 svga_sampler_view_reference(&tex
->cached_view
, NULL
);
228 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
230 SVGA_DBG(DEBUG_DMA
, "unref sid %p (texture)\n", tex
->handle
);
231 svga_screen_surface_destroy(ss
, &tex
->key
, &tex
->handle
);
233 ss
->hud
.total_resource_bytes
-= tex
->size
;
236 FREE(tex
->rendered_to
);
239 assert(ss
->hud
.num_resources
> 0);
240 if (ss
->hud
.num_resources
> 0)
241 ss
->hud
.num_resources
--;
246 * Determine if we need to read back a texture image before mapping it.
249 need_tex_readback(struct pipe_transfer
*transfer
)
251 struct svga_texture
*t
= svga_texture(transfer
->resource
);
253 if (transfer
->usage
& PIPE_TRANSFER_READ
)
256 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) &&
257 ((transfer
->usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) == 0)) {
260 if (transfer
->resource
->target
== PIPE_TEXTURE_CUBE
) {
261 assert(transfer
->box
.depth
== 1);
262 face
= transfer
->box
.z
;
267 if (svga_was_texture_rendered_to(t
, face
, transfer
->level
)) {
276 static enum pipe_error
277 readback_image_vgpu9(struct svga_context
*svga
,
278 struct svga_winsys_surface
*surf
,
284 ret
= SVGA3D_ReadbackGBImage(svga
->swc
, surf
, slice
, level
);
285 if (ret
!= PIPE_OK
) {
286 svga_context_flush(svga
, NULL
);
287 ret
= SVGA3D_ReadbackGBImage(svga
->swc
, surf
, slice
, level
);
293 static enum pipe_error
294 readback_image_vgpu10(struct svga_context
*svga
,
295 struct svga_winsys_surface
*surf
,
298 unsigned numMipLevels
)
301 unsigned subResource
;
303 subResource
= slice
* numMipLevels
+ level
;
304 ret
= SVGA3D_vgpu10_ReadbackSubResource(svga
->swc
, surf
, subResource
);
305 if (ret
!= PIPE_OK
) {
306 svga_context_flush(svga
, NULL
);
307 ret
= SVGA3D_vgpu10_ReadbackSubResource(svga
->swc
, surf
, subResource
);
314 svga_texture_transfer_map(struct pipe_context
*pipe
,
315 struct pipe_resource
*texture
,
318 const struct pipe_box
*box
,
319 struct pipe_transfer
**ptransfer
)
321 struct svga_context
*svga
= svga_context(pipe
);
322 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
323 struct svga_winsys_screen
*sws
= ss
->sws
;
324 struct svga_texture
*tex
= svga_texture(texture
);
325 struct svga_transfer
*st
;
326 unsigned nblocksx
, nblocksy
;
327 boolean use_direct_map
= svga_have_gb_objects(svga
) &&
328 !svga_have_gb_dma(svga
);
331 int64_t begin
= os_time_get();
333 /* We can't map texture storage directly unless we have GB objects */
334 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
) {
335 if (svga_have_gb_objects(svga
))
336 use_direct_map
= TRUE
;
341 st
= CALLOC_STRUCT(svga_transfer
);
347 if (use_direct_map
) {
348 /* we'll directly access the guest-backed surface */
349 w
= u_minify(texture
->width0
, level
);
350 h
= u_minify(texture
->height0
, level
);
351 d
= u_minify(texture
->depth0
, level
);
354 /* we'll put the data into a tightly packed buffer */
359 nblocksx
= util_format_get_nblocksx(texture
->format
, w
);
360 nblocksy
= util_format_get_nblocksy(texture
->format
, h
);
363 pipe_resource_reference(&st
->base
.resource
, texture
);
365 st
->base
.level
= level
;
366 st
->base
.usage
= usage
;
368 st
->base
.stride
= nblocksx
*util_format_get_blocksize(texture
->format
);
369 st
->base
.layer_stride
= st
->base
.stride
* nblocksy
;
371 switch (tex
->b
.b
.target
) {
372 case PIPE_TEXTURE_CUBE
:
373 case PIPE_TEXTURE_2D_ARRAY
:
374 case PIPE_TEXTURE_1D_ARRAY
:
375 st
->slice
= st
->base
.box
.z
;
376 st
->base
.box
.z
= 0; /* so we don't apply double offsets below */
383 if (!use_direct_map
) {
384 /* Use a DMA buffer */
385 st
->hw_nblocksy
= nblocksy
;
387 st
->hwbuf
= svga_winsys_buffer_create(svga
, 1, 0,
388 st
->hw_nblocksy
* st
->base
.stride
* d
);
389 while(!st
->hwbuf
&& (st
->hw_nblocksy
/= 2)) {
390 st
->hwbuf
= svga_winsys_buffer_create(svga
, 1, 0,
391 st
->hw_nblocksy
* st
->base
.stride
* d
);
399 if (st
->hw_nblocksy
< nblocksy
) {
400 /* We couldn't allocate a hardware buffer big enough for the transfer,
401 * so allocate regular malloc memory instead */
403 debug_printf("%s: failed to allocate %u KB of DMA, "
404 "splitting into %u x %u KB DMA transfers\n",
406 (nblocksy
*st
->base
.stride
+ 1023)/1024,
407 (nblocksy
+ st
->hw_nblocksy
- 1)/st
->hw_nblocksy
,
408 (st
->hw_nblocksy
*st
->base
.stride
+ 1023)/1024);
411 st
->swbuf
= MALLOC(nblocksy
* st
->base
.stride
* d
);
413 sws
->buffer_destroy(sws
, st
->hwbuf
);
419 if (usage
& PIPE_TRANSFER_READ
) {
420 SVGA3dSurfaceDMAFlags flags
;
421 memset(&flags
, 0, sizeof flags
);
422 svga_transfer_dma(svga
, st
, SVGA3D_READ_HOST_VRAM
, flags
);
425 struct pipe_transfer
*transfer
= &st
->base
;
426 struct svga_winsys_surface
*surf
= tex
->handle
;
433 if (need_tex_readback(transfer
)) {
436 svga_surfaces_flush(svga
);
438 if (svga_have_vgpu10(svga
)) {
439 ret
= readback_image_vgpu10(svga
, surf
, st
->slice
, transfer
->level
,
440 tex
->b
.b
.last_level
+ 1);
442 ret
= readback_image_vgpu9(svga
, surf
, st
->slice
, transfer
->level
);
445 assert(ret
== PIPE_OK
);
448 svga_context_flush(svga
, NULL
);
451 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
452 * we could potentially clear the flag for all faces/layers/mips.
454 svga_clear_texture_rendered_to(tex
, st
->slice
, transfer
->level
);
457 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
458 if ((transfer
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) == 0) {
459 svga_surfaces_flush(svga
);
460 if (!sws
->surface_is_flushed(sws
, surf
))
461 svga_context_flush(svga
, NULL
);
466 st
->use_direct_map
= use_direct_map
;
468 *ptransfer
= &st
->base
;
474 returnVal
= st
->swbuf
;
476 else if (!st
->use_direct_map
) {
477 returnVal
= sws
->buffer_map(sws
, st
->hwbuf
, usage
);
480 SVGA3dSize baseLevelSize
;
481 struct svga_texture
*tex
= svga_texture(texture
);
482 struct svga_winsys_surface
*surf
= tex
->handle
;
485 unsigned offset
, mip_width
, mip_height
;
486 unsigned xoffset
= st
->base
.box
.x
;
487 unsigned yoffset
= st
->base
.box
.y
;
488 unsigned zoffset
= st
->base
.box
.z
;
490 map
= svga
->swc
->surface_map(svga
->swc
, surf
, usage
, &retry
);
491 if (map
== NULL
&& retry
) {
493 * At this point, the svga_surfaces_flush() should already have
494 * called in svga_texture_get_transfer().
496 svga_context_flush(svga
, NULL
);
497 map
= svga
->swc
->surface_map(svga
->swc
, surf
, usage
, &retry
);
501 * Make sure we return NULL if the map fails
509 * Compute the offset to the specific texture slice in the buffer.
511 baseLevelSize
.width
= tex
->b
.b
.width0
;
512 baseLevelSize
.height
= tex
->b
.b
.height0
;
513 baseLevelSize
.depth
= tex
->b
.b
.depth0
;
515 offset
= svga3dsurface_get_image_offset(tex
->key
.format
, baseLevelSize
,
516 tex
->b
.b
.last_level
+ 1, /* numMips */
522 mip_width
= u_minify(tex
->b
.b
.width0
, level
);
523 mip_height
= u_minify(tex
->b
.b
.height0
, level
);
525 offset
+= svga3dsurface_get_pixel_offset(tex
->key
.format
,
526 mip_width
, mip_height
,
527 xoffset
, yoffset
, zoffset
);
528 returnVal
= (void *) (map
+ offset
);
531 svga
->hud
.map_buffer_time
+= (os_time_get() - begin
);
532 svga
->hud
.num_resources_mapped
++;
539 * Unmap a GB texture surface.
542 svga_texture_surface_unmap(struct svga_context
*svga
,
543 struct pipe_transfer
*transfer
)
545 struct svga_winsys_surface
*surf
= svga_texture(transfer
->resource
)->handle
;
546 struct svga_winsys_context
*swc
= svga
->swc
;
551 swc
->surface_unmap(swc
, surf
, &rebind
);
554 ret
= SVGA3D_BindGBSurface(swc
, surf
);
555 if (ret
!= PIPE_OK
) {
556 /* flush and retry */
557 svga_context_flush(svga
, NULL
);
558 ret
= SVGA3D_BindGBSurface(swc
, surf
);
559 assert(ret
== PIPE_OK
);
565 static enum pipe_error
566 update_image_vgpu9(struct svga_context
*svga
,
567 struct svga_winsys_surface
*surf
,
568 const SVGA3dBox
*box
,
574 ret
= SVGA3D_UpdateGBImage(svga
->swc
, surf
, box
, slice
, level
);
575 if (ret
!= PIPE_OK
) {
576 svga_context_flush(svga
, NULL
);
577 ret
= SVGA3D_UpdateGBImage(svga
->swc
, surf
, box
, slice
, level
);
583 static enum pipe_error
584 update_image_vgpu10(struct svga_context
*svga
,
585 struct svga_winsys_surface
*surf
,
586 const SVGA3dBox
*box
,
589 unsigned numMipLevels
)
592 unsigned subResource
;
594 subResource
= slice
* numMipLevels
+ level
;
595 ret
= SVGA3D_vgpu10_UpdateSubResource(svga
->swc
, surf
, box
, subResource
);
596 if (ret
!= PIPE_OK
) {
597 svga_context_flush(svga
, NULL
);
598 ret
= SVGA3D_vgpu10_UpdateSubResource(svga
->swc
, surf
, box
, subResource
);
605 svga_texture_transfer_unmap(struct pipe_context
*pipe
,
606 struct pipe_transfer
*transfer
)
608 struct svga_context
*svga
= svga_context(pipe
);
609 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
610 struct svga_winsys_screen
*sws
= ss
->sws
;
611 struct svga_transfer
*st
= svga_transfer(transfer
);
612 struct svga_texture
*tex
= svga_texture(transfer
->resource
);
615 if (st
->use_direct_map
) {
616 svga_texture_surface_unmap(svga
, transfer
);
619 sws
->buffer_unmap(sws
, st
->hwbuf
);
623 if (!st
->use_direct_map
&& (st
->base
.usage
& PIPE_TRANSFER_WRITE
)) {
624 /* Use DMA to transfer texture data */
625 SVGA3dSurfaceDMAFlags flags
;
627 memset(&flags
, 0, sizeof flags
);
628 if (transfer
->usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
629 flags
.discard
= TRUE
;
631 if (transfer
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
632 flags
.unsynchronized
= TRUE
;
635 svga_transfer_dma(svga
, st
, SVGA3D_WRITE_HOST_VRAM
, flags
);
636 } else if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
637 struct svga_winsys_surface
*surf
=
638 svga_texture(transfer
->resource
)->handle
;
642 assert(svga_have_gb_objects(svga
));
644 /* update the effected region */
645 box
.x
= transfer
->box
.x
;
646 box
.y
= transfer
->box
.y
;
647 switch (tex
->b
.b
.target
) {
648 case PIPE_TEXTURE_CUBE
:
649 case PIPE_TEXTURE_2D_ARRAY
:
652 case PIPE_TEXTURE_1D_ARRAY
:
656 box
.z
= transfer
->box
.z
;
659 box
.w
= transfer
->box
.width
;
660 box
.h
= transfer
->box
.height
;
661 box
.d
= transfer
->box
.depth
;
664 debug_printf("%s %d, %d, %d %d x %d x %d\n",
667 box
.w
, box
.h
, box
.d
);
669 if (svga_have_vgpu10(svga
)) {
670 ret
= update_image_vgpu10(svga
, surf
, &box
, st
->slice
, transfer
->level
,
671 tex
->b
.b
.last_level
+ 1);
673 ret
= update_image_vgpu9(svga
, surf
, &box
, st
->slice
, transfer
->level
);
676 assert(ret
== PIPE_OK
);
680 ss
->texture_timestamp
++;
681 svga_age_texture_view(tex
, transfer
->level
);
682 if (transfer
->resource
->target
== PIPE_TEXTURE_CUBE
)
683 svga_define_texture_level(tex
, st
->slice
, transfer
->level
);
685 svga_define_texture_level(tex
, 0, transfer
->level
);
687 pipe_resource_reference(&st
->base
.resource
, NULL
);
690 if (!st
->use_direct_map
) {
691 sws
->buffer_destroy(sws
, st
->hwbuf
);
698 * Does format store depth values?
700 static inline boolean
701 format_has_depth(enum pipe_format format
)
703 const struct util_format_description
*desc
= util_format_description(format
);
704 return util_format_has_depth(desc
);
708 struct u_resource_vtbl svga_texture_vtbl
=
710 svga_texture_get_handle
, /* get_handle */
711 svga_texture_destroy
, /* resource_destroy */
712 svga_texture_transfer_map
, /* transfer_map */
713 u_default_transfer_flush_region
, /* transfer_flush_region */
714 svga_texture_transfer_unmap
, /* transfer_unmap */
715 u_default_transfer_inline_write
/* transfer_inline_write */
719 struct pipe_resource
*
720 svga_texture_create(struct pipe_screen
*screen
,
721 const struct pipe_resource
*template)
723 struct svga_screen
*svgascreen
= svga_screen(screen
);
724 struct svga_texture
*tex
;
725 unsigned bindings
= template->bind
;
727 assert(template->last_level
< SVGA_MAX_TEXTURE_LEVELS
);
728 if (template->last_level
>= SVGA_MAX_TEXTURE_LEVELS
) {
732 tex
= CALLOC_STRUCT(svga_texture
);
737 tex
->defined
= CALLOC(template->depth0
* template->array_size
,
738 sizeof(tex
->defined
[0]));
744 tex
->rendered_to
= CALLOC(template->depth0
* template->array_size
,
745 sizeof(tex
->rendered_to
[0]));
746 if (!tex
->rendered_to
) {
752 tex
->b
.b
= *template;
753 tex
->b
.vtbl
= &svga_texture_vtbl
;
754 pipe_reference_init(&tex
->b
.b
.reference
, 1);
755 tex
->b
.b
.screen
= screen
;
758 tex
->key
.size
.width
= template->width0
;
759 tex
->key
.size
.height
= template->height0
;
760 tex
->key
.size
.depth
= template->depth0
;
761 tex
->key
.arraySize
= 1;
762 tex
->key
.numFaces
= 1;
763 tex
->key
.sampleCount
= template->nr_samples
;
765 if (template->nr_samples
> 1) {
766 tex
->key
.flags
|= SVGA3D_SURFACE_MASKABLE_ANTIALIAS
;
769 if (svgascreen
->sws
->have_vgpu10
) {
770 switch (template->target
) {
771 case PIPE_TEXTURE_1D
:
772 tex
->key
.flags
|= SVGA3D_SURFACE_1D
;
774 case PIPE_TEXTURE_1D_ARRAY
:
775 tex
->key
.flags
|= SVGA3D_SURFACE_1D
;
777 case PIPE_TEXTURE_2D_ARRAY
:
778 tex
->key
.flags
|= SVGA3D_SURFACE_ARRAY
;
779 tex
->key
.arraySize
= template->array_size
;
781 case PIPE_TEXTURE_3D
:
782 tex
->key
.flags
|= SVGA3D_SURFACE_VOLUME
;
784 case PIPE_TEXTURE_CUBE
:
785 tex
->key
.flags
|= (SVGA3D_SURFACE_CUBEMAP
| SVGA3D_SURFACE_ARRAY
);
786 tex
->key
.numFaces
= 6;
793 switch (template->target
) {
794 case PIPE_TEXTURE_3D
:
795 tex
->key
.flags
|= SVGA3D_SURFACE_VOLUME
;
797 case PIPE_TEXTURE_CUBE
:
798 tex
->key
.flags
|= SVGA3D_SURFACE_CUBEMAP
;
799 tex
->key
.numFaces
= 6;
806 tex
->key
.cachable
= 1;
808 if (bindings
& PIPE_BIND_SAMPLER_VIEW
) {
809 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_TEXTURE
;
810 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_SHADER_RESOURCE
;
812 if (!(bindings
& PIPE_BIND_RENDER_TARGET
)) {
813 /* Also check if the format is renderable */
814 if (screen
->is_format_supported(screen
, template->format
,
816 template->nr_samples
,
817 PIPE_BIND_RENDER_TARGET
)) {
818 bindings
|= PIPE_BIND_RENDER_TARGET
;
823 if (bindings
& PIPE_BIND_DISPLAY_TARGET
) {
824 tex
->key
.cachable
= 0;
827 if (bindings
& PIPE_BIND_SHARED
) {
828 tex
->key
.cachable
= 0;
831 if (bindings
& (PIPE_BIND_SCANOUT
| PIPE_BIND_CURSOR
)) {
832 tex
->key
.scanout
= 1;
833 tex
->key
.cachable
= 0;
837 * Note: Previously we never passed the
838 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
839 * know beforehand whether a texture will be used as a rendertarget or not
840 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
841 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
843 * However, this was changed since other state trackers
844 * (XA for example) uses it accurately and certain device versions
845 * relies on it in certain situations to render correctly.
847 if ((bindings
& PIPE_BIND_RENDER_TARGET
) &&
848 !util_format_is_s3tc(template->format
)) {
849 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_RENDERTARGET
;
850 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_RENDER_TARGET
;
853 if (bindings
& PIPE_BIND_DEPTH_STENCIL
) {
854 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_DEPTHSTENCIL
;
855 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_DEPTH_STENCIL
;
858 tex
->key
.numMipLevels
= template->last_level
+ 1;
860 tex
->key
.format
= svga_translate_format(svgascreen
, template->format
,
862 if (tex
->key
.format
== SVGA3D_FORMAT_INVALID
) {
864 FREE(tex
->rendered_to
);
869 /* Use typeless formats for sRGB and depth resources. Typeless
870 * formats can be reinterpreted as other formats. For example,
871 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
872 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
874 if (svgascreen
->sws
->have_vgpu10
&&
875 (util_format_is_srgb(template->format
) ||
876 format_has_depth(template->format
))) {
877 SVGA3dSurfaceFormat typeless
= svga_typeless_format(tex
->key
.format
);
879 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
880 svga_format_name(tex
->key
.format
),
881 svga_format_name(typeless
),
884 tex
->key
.format
= typeless
;
887 SVGA_DBG(DEBUG_DMA
, "surface_create for texture\n", tex
->handle
);
888 tex
->handle
= svga_screen_surface_create(svgascreen
, bindings
,
889 tex
->b
.b
.usage
, &tex
->key
);
892 FREE(tex
->rendered_to
);
897 SVGA_DBG(DEBUG_DMA
, " --> got sid %p (texture)\n", tex
->handle
);
899 debug_reference(&tex
->b
.b
.reference
,
900 (debug_reference_descriptor
)debug_describe_resource
, 0);
902 tex
->size
= util_resource_size(template);
903 svgascreen
->hud
.total_resource_bytes
+= tex
->size
;
904 svgascreen
->hud
.num_resources
++;
910 struct pipe_resource
*
911 svga_texture_from_handle(struct pipe_screen
*screen
,
912 const struct pipe_resource
*template,
913 struct winsys_handle
*whandle
)
915 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
916 struct svga_screen
*ss
= svga_screen(screen
);
917 struct svga_winsys_surface
*srf
;
918 struct svga_texture
*tex
;
919 enum SVGA3dSurfaceFormat format
= 0;
922 /* Only supports one type */
923 if ((template->target
!= PIPE_TEXTURE_2D
&&
924 template->target
!= PIPE_TEXTURE_RECT
) ||
925 template->last_level
!= 0 ||
926 template->depth0
!= 1) {
930 srf
= sws
->surface_from_handle(sws
, whandle
, &format
);
935 if (svga_translate_format(svga_screen(screen
), template->format
,
936 template->bind
) != format
) {
937 unsigned f1
= svga_translate_format(svga_screen(screen
),
938 template->format
, template->bind
);
939 unsigned f2
= format
;
941 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
943 if (f1
== SVGA3D_B8G8R8A8_UNORM
)
944 f1
= SVGA3D_A8R8G8B8
;
945 if (f1
== SVGA3D_B8G8R8X8_UNORM
)
946 f1
= SVGA3D_X8R8G8B8
;
948 if ( !( (f1
== f2
) ||
949 (f1
== SVGA3D_X8R8G8B8
&& f2
== SVGA3D_A8R8G8B8
) ||
950 (f1
== SVGA3D_X8R8G8B8
&& f2
== SVGA3D_B8G8R8X8_UNORM
) ||
951 (f1
== SVGA3D_A8R8G8B8
&& f2
== SVGA3D_X8R8G8B8
) ||
952 (f1
== SVGA3D_A8R8G8B8
&& f2
== SVGA3D_B8G8R8A8_UNORM
) ||
953 (f1
== SVGA3D_Z_D24X8
&& f2
== SVGA3D_Z_D24S8
) ||
954 (f1
== SVGA3D_Z_DF24
&& f2
== SVGA3D_Z_D24S8_INT
) ) ) {
955 debug_printf("%s wrong format %s != %s\n", __FUNCTION__
,
956 svga_format_name(f1
), svga_format_name(f2
));
961 tex
= CALLOC_STRUCT(svga_texture
);
965 tex
->defined
= CALLOC(template->depth0
* template->array_size
,
966 sizeof(tex
->defined
[0]));
972 tex
->b
.b
= *template;
973 tex
->b
.vtbl
= &svga_texture_vtbl
;
974 pipe_reference_init(&tex
->b
.b
.reference
, 1);
975 tex
->b
.b
.screen
= screen
;
977 SVGA_DBG(DEBUG_DMA
, "wrap surface sid %p\n", srf
);
979 tex
->key
.cachable
= 0;
980 tex
->key
.format
= format
;
983 tex
->rendered_to
= CALLOC(1, sizeof(tex
->rendered_to
[0]));
984 tex
->imported
= TRUE
;
986 ss
->hud
.num_resources
++;