1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 **********************************************************/
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "os/os_time.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_math.h"
36 #include "util/u_memory.h"
37 #include "util/u_resource.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
51 svga_transfer_dma_band(struct svga_context
*svga
,
52 struct svga_transfer
*st
,
53 SVGA3dTransferType transfer
,
54 unsigned x
, unsigned y
, unsigned z
,
55 unsigned w
, unsigned h
, unsigned d
,
56 unsigned srcx
, unsigned srcy
, unsigned srcz
,
57 SVGA3dSurfaceDMAFlags flags
)
59 struct svga_texture
*texture
= svga_texture(st
->base
.resource
);
63 assert(!st
->use_direct_map
);
75 SVGA_DBG(DEBUG_DMA
, "dma %s sid %p, face %u, (%u, %u, %u) - "
76 "(%u, %u, %u), %ubpp\n",
77 transfer
== SVGA3D_WRITE_HOST_VRAM
? "to" : "from",
86 util_format_get_blocksize(texture
->b
.b
.format
) * 8 /
87 (util_format_get_blockwidth(texture
->b
.b
.format
)
88 * util_format_get_blockheight(texture
->b
.b
.format
)));
90 ret
= SVGA3D_SurfaceDMA(svga
->swc
, st
, transfer
, &box
, 1, flags
);
92 svga_context_flush(svga
, NULL
);
93 ret
= SVGA3D_SurfaceDMA(svga
->swc
, st
, transfer
, &box
, 1, flags
);
94 assert(ret
== PIPE_OK
);
100 svga_transfer_dma(struct svga_context
*svga
,
101 struct svga_transfer
*st
,
102 SVGA3dTransferType transfer
,
103 SVGA3dSurfaceDMAFlags flags
)
105 struct svga_texture
*texture
= svga_texture(st
->base
.resource
);
106 struct svga_screen
*screen
= svga_screen(texture
->b
.b
.screen
);
107 struct svga_winsys_screen
*sws
= screen
->sws
;
108 struct pipe_fence_handle
*fence
= NULL
;
110 assert(!st
->use_direct_map
);
112 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
113 SVGA_DBG(DEBUG_PERF
, "%s: readback transfer\n", __FUNCTION__
);
116 /* Ensure any pending operations on host surfaces are queued on the command
119 svga_surfaces_flush( svga
);
122 /* Do the DMA transfer in a single go */
123 svga_transfer_dma_band(svga
, st
, transfer
,
124 st
->base
.box
.x
, st
->base
.box
.y
, st
->base
.box
.z
,
125 st
->base
.box
.width
, st
->base
.box
.height
, st
->base
.box
.depth
,
129 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
130 svga_context_flush(svga
, &fence
);
131 sws
->fence_finish(sws
, fence
, 0);
132 sws
->fence_reference(sws
, &fence
, NULL
);
137 unsigned blockheight
=
138 util_format_get_blockheight(st
->base
.resource
->format
);
140 h
= st
->hw_nblocksy
* blockheight
;
143 for (y
= 0; y
< st
->base
.box
.height
; y
+= h
) {
144 unsigned offset
, length
;
147 if (y
+ h
> st
->base
.box
.height
)
148 h
= st
->base
.box
.height
- y
;
150 /* Transfer band must be aligned to pixel block boundaries */
151 assert(y
% blockheight
== 0);
152 assert(h
% blockheight
== 0);
154 offset
= y
* st
->base
.stride
/ blockheight
;
155 length
= h
* st
->base
.stride
/ blockheight
;
157 sw
= (uint8_t *) st
->swbuf
+ offset
;
159 if (transfer
== SVGA3D_WRITE_HOST_VRAM
) {
160 unsigned usage
= PIPE_TRANSFER_WRITE
;
162 /* Wait for the previous DMAs to complete */
163 /* TODO: keep one DMA (at half the size) in the background */
165 svga_context_flush(svga
, NULL
);
166 usage
|= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
;
169 hw
= sws
->buffer_map(sws
, st
->hwbuf
, usage
);
172 memcpy(hw
, sw
, length
);
173 sws
->buffer_unmap(sws
, st
->hwbuf
);
177 svga_transfer_dma_band(svga
, st
, transfer
,
178 st
->base
.box
.x
, y
, st
->base
.box
.z
,
179 st
->base
.box
.width
, h
, st
->base
.box
.depth
,
183 * Prevent the texture contents to be discarded on the next band
186 flags
.discard
= FALSE
;
188 if (transfer
== SVGA3D_READ_HOST_VRAM
) {
189 svga_context_flush(svga
, &fence
);
190 sws
->fence_finish(sws
, fence
, 0);
192 hw
= sws
->buffer_map(sws
, st
->hwbuf
, PIPE_TRANSFER_READ
);
195 memcpy(sw
, hw
, length
);
196 sws
->buffer_unmap(sws
, st
->hwbuf
);
205 svga_texture_get_handle(struct pipe_screen
*screen
,
206 struct pipe_resource
*texture
,
207 struct winsys_handle
*whandle
)
209 struct svga_winsys_screen
*sws
= svga_winsys_screen(texture
->screen
);
212 assert(svga_texture(texture
)->key
.cachable
== 0);
213 svga_texture(texture
)->key
.cachable
= 0;
215 stride
= util_format_get_nblocksx(texture
->format
, texture
->width0
) *
216 util_format_get_blocksize(texture
->format
);
218 return sws
->surface_get_handle(sws
, svga_texture(texture
)->handle
,
224 svga_texture_destroy(struct pipe_screen
*screen
,
225 struct pipe_resource
*pt
)
227 struct svga_screen
*ss
= svga_screen(screen
);
228 struct svga_texture
*tex
= svga_texture(pt
);
230 ss
->texture_timestamp
++;
232 svga_sampler_view_reference(&tex
->cached_view
, NULL
);
235 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
237 SVGA_DBG(DEBUG_DMA
, "unref sid %p (texture)\n", tex
->handle
);
238 svga_screen_surface_destroy(ss
, &tex
->key
, &tex
->handle
);
240 ss
->hud
.total_resource_bytes
-= tex
->size
;
243 FREE(tex
->rendered_to
);
247 assert(ss
->hud
.num_resources
> 0);
248 if (ss
->hud
.num_resources
> 0)
249 ss
->hud
.num_resources
--;
254 * Determine if we need to read back a texture image before mapping it.
257 need_tex_readback(struct pipe_transfer
*transfer
)
259 struct svga_texture
*t
= svga_texture(transfer
->resource
);
261 if (transfer
->usage
& PIPE_TRANSFER_READ
)
264 if ((transfer
->usage
& PIPE_TRANSFER_WRITE
) &&
265 ((transfer
->usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) == 0)) {
268 if (transfer
->resource
->target
== PIPE_TEXTURE_CUBE
) {
269 assert(transfer
->box
.depth
== 1);
270 face
= transfer
->box
.z
;
275 if (svga_was_texture_rendered_to(t
, face
, transfer
->level
)) {
284 static enum pipe_error
285 readback_image_vgpu9(struct svga_context
*svga
,
286 struct svga_winsys_surface
*surf
,
292 ret
= SVGA3D_ReadbackGBImage(svga
->swc
, surf
, slice
, level
);
293 if (ret
!= PIPE_OK
) {
294 svga_context_flush(svga
, NULL
);
295 ret
= SVGA3D_ReadbackGBImage(svga
->swc
, surf
, slice
, level
);
301 static enum pipe_error
302 readback_image_vgpu10(struct svga_context
*svga
,
303 struct svga_winsys_surface
*surf
,
306 unsigned numMipLevels
)
309 unsigned subResource
;
311 subResource
= slice
* numMipLevels
+ level
;
312 ret
= SVGA3D_vgpu10_ReadbackSubResource(svga
->swc
, surf
, subResource
);
313 if (ret
!= PIPE_OK
) {
314 svga_context_flush(svga
, NULL
);
315 ret
= SVGA3D_vgpu10_ReadbackSubResource(svga
->swc
, surf
, subResource
);
322 svga_texture_transfer_map(struct pipe_context
*pipe
,
323 struct pipe_resource
*texture
,
326 const struct pipe_box
*box
,
327 struct pipe_transfer
**ptransfer
)
329 struct svga_context
*svga
= svga_context(pipe
);
330 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
331 struct svga_winsys_screen
*sws
= ss
->sws
;
332 struct svga_texture
*tex
= svga_texture(texture
);
333 struct svga_transfer
*st
;
334 unsigned nblocksx
, nblocksy
;
335 boolean use_direct_map
= svga_have_gb_objects(svga
) &&
336 !svga_have_gb_dma(svga
);
339 int64_t begin
= os_time_get();
341 /* We can't map texture storage directly unless we have GB objects */
342 if (usage
& PIPE_TRANSFER_MAP_DIRECTLY
) {
343 if (svga_have_gb_objects(svga
))
344 use_direct_map
= TRUE
;
349 st
= CALLOC_STRUCT(svga_transfer
);
353 st
->base
.level
= level
;
354 st
->base
.usage
= usage
;
357 switch (tex
->b
.b
.target
) {
358 case PIPE_TEXTURE_CUBE
:
359 st
->slice
= st
->base
.box
.z
;
360 st
->base
.box
.z
= 0; /* so we don't apply double offsets below */
362 case PIPE_TEXTURE_2D_ARRAY
:
363 case PIPE_TEXTURE_1D_ARRAY
:
364 st
->slice
= st
->base
.box
.z
;
365 st
->base
.box
.z
= 0; /* so we don't apply double offsets below */
367 /* Force direct map for transfering multiple slices */
368 if (st
->base
.box
.depth
> 1)
369 use_direct_map
= svga_have_gb_objects(svga
);
379 if (use_direct_map
) {
380 /* we'll directly access the guest-backed surface */
381 w
= u_minify(texture
->width0
, level
);
382 h
= u_minify(texture
->height0
, level
);
383 d
= u_minify(texture
->depth0
, level
);
386 /* we'll put the data into a tightly packed buffer */
391 nblocksx
= util_format_get_nblocksx(texture
->format
, w
);
392 nblocksy
= util_format_get_nblocksy(texture
->format
, h
);
395 pipe_resource_reference(&st
->base
.resource
, texture
);
397 st
->base
.stride
= nblocksx
*util_format_get_blocksize(texture
->format
);
398 st
->base
.layer_stride
= st
->base
.stride
* nblocksy
;
400 if (usage
& PIPE_TRANSFER_WRITE
) {
401 /* record texture upload for HUD */
402 svga
->hud
.num_bytes_uploaded
+=
403 nblocksx
* nblocksy
* d
* util_format_get_blocksize(texture
->format
);
406 if (!use_direct_map
) {
407 /* Use a DMA buffer */
408 st
->hw_nblocksy
= nblocksy
;
410 st
->hwbuf
= svga_winsys_buffer_create(svga
, 1, 0,
411 st
->hw_nblocksy
* st
->base
.stride
* d
);
412 while(!st
->hwbuf
&& (st
->hw_nblocksy
/= 2)) {
413 st
->hwbuf
= svga_winsys_buffer_create(svga
, 1, 0,
414 st
->hw_nblocksy
* st
->base
.stride
* d
);
422 if (st
->hw_nblocksy
< nblocksy
) {
423 /* We couldn't allocate a hardware buffer big enough for the transfer,
424 * so allocate regular malloc memory instead */
426 debug_printf("%s: failed to allocate %u KB of DMA, "
427 "splitting into %u x %u KB DMA transfers\n",
429 (nblocksy
*st
->base
.stride
+ 1023)/1024,
430 (nblocksy
+ st
->hw_nblocksy
- 1)/st
->hw_nblocksy
,
431 (st
->hw_nblocksy
*st
->base
.stride
+ 1023)/1024);
434 st
->swbuf
= MALLOC(nblocksy
* st
->base
.stride
* d
);
436 sws
->buffer_destroy(sws
, st
->hwbuf
);
442 if (usage
& PIPE_TRANSFER_READ
) {
443 SVGA3dSurfaceDMAFlags flags
;
444 memset(&flags
, 0, sizeof flags
);
445 svga_transfer_dma(svga
, st
, SVGA3D_READ_HOST_VRAM
, flags
);
448 struct pipe_transfer
*transfer
= &st
->base
;
449 struct svga_winsys_surface
*surf
= tex
->handle
;
456 /* If this is the first time mapping to the surface in this
457 * command buffer, clear the dirty masks of this surface.
459 if (sws
->surface_is_flushed(sws
, surf
)) {
460 svga_clear_texture_dirty(tex
);
463 if (need_tex_readback(transfer
)) {
466 svga_surfaces_flush(svga
);
468 if (svga_have_vgpu10(svga
)) {
469 ret
= readback_image_vgpu10(svga
, surf
, st
->slice
, transfer
->level
,
470 tex
->b
.b
.last_level
+ 1);
472 ret
= readback_image_vgpu9(svga
, surf
, st
->slice
, transfer
->level
);
475 svga
->hud
.num_readbacks
++;
477 assert(ret
== PIPE_OK
);
480 svga_context_flush(svga
, NULL
);
483 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
484 * we could potentially clear the flag for all faces/layers/mips.
486 svga_clear_texture_rendered_to(tex
, st
->slice
, transfer
->level
);
489 assert(transfer
->usage
& PIPE_TRANSFER_WRITE
);
490 if ((transfer
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) == 0) {
491 if (svga_is_texture_dirty(tex
, st
->slice
, transfer
->level
)) {
493 * do a surface flush if the subresource has been modified
494 * in this command buffer.
496 svga_surfaces_flush(svga
);
497 if (!sws
->surface_is_flushed(sws
, surf
)) {
498 svga
->hud
.surface_write_flushes
++;
499 svga_context_flush(svga
, NULL
);
504 if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
505 /* mark this texture level as dirty */
506 svga_set_texture_dirty(tex
, st
->slice
, transfer
->level
);
510 st
->use_direct_map
= use_direct_map
;
512 *ptransfer
= &st
->base
;
518 returnVal
= st
->swbuf
;
520 else if (!st
->use_direct_map
) {
521 returnVal
= sws
->buffer_map(sws
, st
->hwbuf
, usage
);
524 SVGA3dSize baseLevelSize
;
525 struct svga_texture
*tex
= svga_texture(texture
);
526 struct svga_winsys_surface
*surf
= tex
->handle
;
529 unsigned offset
, mip_width
, mip_height
;
530 unsigned xoffset
= st
->base
.box
.x
;
531 unsigned yoffset
= st
->base
.box
.y
;
532 unsigned zoffset
= st
->base
.box
.z
;
534 map
= svga
->swc
->surface_map(svga
->swc
, surf
, usage
, &retry
);
535 if (map
== NULL
&& retry
) {
537 * At this point, the svga_surfaces_flush() should already have
538 * called in svga_texture_get_transfer().
540 svga_context_flush(svga
, NULL
);
541 map
= svga
->swc
->surface_map(svga
->swc
, surf
, usage
, &retry
);
545 * Make sure we return NULL if the map fails
553 * Compute the offset to the specific texture slice in the buffer.
555 baseLevelSize
.width
= tex
->b
.b
.width0
;
556 baseLevelSize
.height
= tex
->b
.b
.height0
;
557 baseLevelSize
.depth
= tex
->b
.b
.depth0
;
559 if ((tex
->b
.b
.target
== PIPE_TEXTURE_1D_ARRAY
) ||
560 (tex
->b
.b
.target
== PIPE_TEXTURE_2D_ARRAY
)) {
561 st
->base
.layer_stride
=
562 svga3dsurface_get_image_offset(tex
->key
.format
, baseLevelSize
,
563 tex
->b
.b
.last_level
+ 1, 1, 0);
566 offset
= svga3dsurface_get_image_offset(tex
->key
.format
, baseLevelSize
,
567 tex
->b
.b
.last_level
+ 1, /* numMips */
573 mip_width
= u_minify(tex
->b
.b
.width0
, level
);
574 mip_height
= u_minify(tex
->b
.b
.height0
, level
);
576 offset
+= svga3dsurface_get_pixel_offset(tex
->key
.format
,
577 mip_width
, mip_height
,
578 xoffset
, yoffset
, zoffset
);
579 returnVal
= (void *) (map
+ offset
);
582 svga
->hud
.map_buffer_time
+= (os_time_get() - begin
);
583 svga
->hud
.num_resources_mapped
++;
590 * Unmap a GB texture surface.
593 svga_texture_surface_unmap(struct svga_context
*svga
,
594 struct pipe_transfer
*transfer
)
596 struct svga_winsys_surface
*surf
= svga_texture(transfer
->resource
)->handle
;
597 struct svga_winsys_context
*swc
= svga
->swc
;
602 swc
->surface_unmap(swc
, surf
, &rebind
);
605 ret
= SVGA3D_BindGBSurface(swc
, surf
);
606 if (ret
!= PIPE_OK
) {
607 /* flush and retry */
608 svga_context_flush(svga
, NULL
);
609 ret
= SVGA3D_BindGBSurface(swc
, surf
);
610 assert(ret
== PIPE_OK
);
616 static enum pipe_error
617 update_image_vgpu9(struct svga_context
*svga
,
618 struct svga_winsys_surface
*surf
,
619 const SVGA3dBox
*box
,
625 ret
= SVGA3D_UpdateGBImage(svga
->swc
, surf
, box
, slice
, level
);
626 if (ret
!= PIPE_OK
) {
627 svga_context_flush(svga
, NULL
);
628 ret
= SVGA3D_UpdateGBImage(svga
->swc
, surf
, box
, slice
, level
);
634 static enum pipe_error
635 update_image_vgpu10(struct svga_context
*svga
,
636 struct svga_winsys_surface
*surf
,
637 const SVGA3dBox
*box
,
640 unsigned numMipLevels
)
643 unsigned subResource
;
645 subResource
= slice
* numMipLevels
+ level
;
646 ret
= SVGA3D_vgpu10_UpdateSubResource(svga
->swc
, surf
, box
, subResource
);
647 if (ret
!= PIPE_OK
) {
648 svga_context_flush(svga
, NULL
);
649 ret
= SVGA3D_vgpu10_UpdateSubResource(svga
->swc
, surf
, box
, subResource
);
656 svga_texture_transfer_unmap(struct pipe_context
*pipe
,
657 struct pipe_transfer
*transfer
)
659 struct svga_context
*svga
= svga_context(pipe
);
660 struct svga_screen
*ss
= svga_screen(pipe
->screen
);
661 struct svga_winsys_screen
*sws
= ss
->sws
;
662 struct svga_transfer
*st
= svga_transfer(transfer
);
663 struct svga_texture
*tex
= svga_texture(transfer
->resource
);
666 if (st
->use_direct_map
) {
667 svga_texture_surface_unmap(svga
, transfer
);
670 sws
->buffer_unmap(sws
, st
->hwbuf
);
674 if (!st
->use_direct_map
&& (st
->base
.usage
& PIPE_TRANSFER_WRITE
)) {
675 /* Use DMA to transfer texture data */
676 SVGA3dSurfaceDMAFlags flags
;
678 memset(&flags
, 0, sizeof flags
);
679 if (transfer
->usage
& PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE
) {
680 flags
.discard
= TRUE
;
682 if (transfer
->usage
& PIPE_TRANSFER_UNSYNCHRONIZED
) {
683 flags
.unsynchronized
= TRUE
;
686 svga_transfer_dma(svga
, st
, SVGA3D_WRITE_HOST_VRAM
, flags
);
687 } else if (transfer
->usage
& PIPE_TRANSFER_WRITE
) {
688 struct svga_winsys_surface
*surf
=
689 svga_texture(transfer
->resource
)->handle
;
692 unsigned nlayers
= 1;
694 assert(svga_have_gb_objects(svga
));
696 /* update the effected region */
697 box
.x
= transfer
->box
.x
;
698 box
.y
= transfer
->box
.y
;
699 box
.w
= transfer
->box
.width
;
700 box
.h
= transfer
->box
.height
;
701 box
.d
= transfer
->box
.depth
;
703 switch (tex
->b
.b
.target
) {
704 case PIPE_TEXTURE_CUBE
:
707 case PIPE_TEXTURE_2D_ARRAY
:
712 case PIPE_TEXTURE_1D_ARRAY
:
718 box
.z
= transfer
->box
.z
;
723 debug_printf("%s %d, %d, %d %d x %d x %d\n",
726 box
.w
, box
.h
, box
.d
);
728 if (svga_have_vgpu10(svga
)) {
730 for (i
= 0; i
< nlayers
; i
++) {
731 ret
= update_image_vgpu10(svga
, surf
, &box
,
732 st
->slice
+ i
, transfer
->level
,
733 tex
->b
.b
.last_level
+ 1);
734 assert(ret
== PIPE_OK
);
737 assert(nlayers
== 1);
738 ret
= update_image_vgpu9(svga
, surf
, &box
, st
->slice
, transfer
->level
);
739 assert(ret
== PIPE_OK
);
742 svga
->hud
.num_resource_updates
++;
747 ss
->texture_timestamp
++;
748 svga_age_texture_view(tex
, transfer
->level
);
749 if (transfer
->resource
->target
== PIPE_TEXTURE_CUBE
)
750 svga_define_texture_level(tex
, st
->slice
, transfer
->level
);
752 svga_define_texture_level(tex
, 0, transfer
->level
);
754 pipe_resource_reference(&st
->base
.resource
, NULL
);
757 if (!st
->use_direct_map
) {
758 sws
->buffer_destroy(sws
, st
->hwbuf
);
765 * Does format store depth values?
767 static inline boolean
768 format_has_depth(enum pipe_format format
)
770 const struct util_format_description
*desc
= util_format_description(format
);
771 return util_format_has_depth(desc
);
775 struct u_resource_vtbl svga_texture_vtbl
=
777 svga_texture_get_handle
, /* get_handle */
778 svga_texture_destroy
, /* resource_destroy */
779 svga_texture_transfer_map
, /* transfer_map */
780 u_default_transfer_flush_region
, /* transfer_flush_region */
781 svga_texture_transfer_unmap
, /* transfer_unmap */
782 u_default_transfer_inline_write
/* transfer_inline_write */
786 struct pipe_resource
*
787 svga_texture_create(struct pipe_screen
*screen
,
788 const struct pipe_resource
*template)
790 struct svga_screen
*svgascreen
= svga_screen(screen
);
791 struct svga_texture
*tex
;
792 unsigned bindings
= template->bind
;
794 assert(template->last_level
< SVGA_MAX_TEXTURE_LEVELS
);
795 if (template->last_level
>= SVGA_MAX_TEXTURE_LEVELS
) {
799 tex
= CALLOC_STRUCT(svga_texture
);
804 tex
->defined
= CALLOC(template->depth0
* template->array_size
,
805 sizeof(tex
->defined
[0]));
811 tex
->rendered_to
= CALLOC(template->depth0
* template->array_size
,
812 sizeof(tex
->rendered_to
[0]));
813 if (!tex
->rendered_to
) {
817 tex
->dirty
= CALLOC(template->depth0
* template->array_size
,
818 sizeof(tex
->dirty
[0]));
823 tex
->b
.b
= *template;
824 tex
->b
.vtbl
= &svga_texture_vtbl
;
825 pipe_reference_init(&tex
->b
.b
.reference
, 1);
826 tex
->b
.b
.screen
= screen
;
829 tex
->key
.size
.width
= template->width0
;
830 tex
->key
.size
.height
= template->height0
;
831 tex
->key
.size
.depth
= template->depth0
;
832 tex
->key
.arraySize
= 1;
833 tex
->key
.numFaces
= 1;
834 tex
->key
.sampleCount
= template->nr_samples
;
836 if (template->nr_samples
> 1) {
837 tex
->key
.flags
|= SVGA3D_SURFACE_MASKABLE_ANTIALIAS
;
840 if (svgascreen
->sws
->have_vgpu10
) {
841 switch (template->target
) {
842 case PIPE_TEXTURE_1D
:
843 tex
->key
.flags
|= SVGA3D_SURFACE_1D
;
845 case PIPE_TEXTURE_1D_ARRAY
:
846 tex
->key
.flags
|= SVGA3D_SURFACE_1D
;
848 case PIPE_TEXTURE_2D_ARRAY
:
849 tex
->key
.flags
|= SVGA3D_SURFACE_ARRAY
;
850 tex
->key
.arraySize
= template->array_size
;
852 case PIPE_TEXTURE_3D
:
853 tex
->key
.flags
|= SVGA3D_SURFACE_VOLUME
;
855 case PIPE_TEXTURE_CUBE
:
856 tex
->key
.flags
|= (SVGA3D_SURFACE_CUBEMAP
| SVGA3D_SURFACE_ARRAY
);
857 tex
->key
.numFaces
= 6;
864 switch (template->target
) {
865 case PIPE_TEXTURE_3D
:
866 tex
->key
.flags
|= SVGA3D_SURFACE_VOLUME
;
868 case PIPE_TEXTURE_CUBE
:
869 tex
->key
.flags
|= SVGA3D_SURFACE_CUBEMAP
;
870 tex
->key
.numFaces
= 6;
877 tex
->key
.cachable
= 1;
879 if ((bindings
& (PIPE_BIND_RENDER_TARGET
| PIPE_BIND_DEPTH_STENCIL
)) &&
880 !(bindings
& PIPE_BIND_SAMPLER_VIEW
)) {
881 /* Also check if the format can be sampled from */
882 if (screen
->is_format_supported(screen
, template->format
,
884 template->nr_samples
,
885 PIPE_BIND_SAMPLER_VIEW
)) {
886 bindings
|= PIPE_BIND_SAMPLER_VIEW
;
890 if (bindings
& PIPE_BIND_SAMPLER_VIEW
) {
891 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_TEXTURE
;
892 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_SHADER_RESOURCE
;
894 if (!(bindings
& PIPE_BIND_RENDER_TARGET
)) {
895 /* Also check if the format is renderable */
896 if (screen
->is_format_supported(screen
, template->format
,
898 template->nr_samples
,
899 PIPE_BIND_RENDER_TARGET
)) {
900 bindings
|= PIPE_BIND_RENDER_TARGET
;
905 if (bindings
& PIPE_BIND_DISPLAY_TARGET
) {
906 tex
->key
.cachable
= 0;
909 if (bindings
& PIPE_BIND_SHARED
) {
910 tex
->key
.cachable
= 0;
913 if (bindings
& (PIPE_BIND_SCANOUT
| PIPE_BIND_CURSOR
)) {
914 tex
->key
.scanout
= 1;
915 tex
->key
.cachable
= 0;
919 * Note: Previously we never passed the
920 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
921 * know beforehand whether a texture will be used as a rendertarget or not
922 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
923 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
925 * However, this was changed since other state trackers
926 * (XA for example) uses it accurately and certain device versions
927 * relies on it in certain situations to render correctly.
929 if ((bindings
& PIPE_BIND_RENDER_TARGET
) &&
930 !util_format_is_s3tc(template->format
)) {
931 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_RENDERTARGET
;
932 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_RENDER_TARGET
;
935 if (bindings
& PIPE_BIND_DEPTH_STENCIL
) {
936 tex
->key
.flags
|= SVGA3D_SURFACE_HINT_DEPTHSTENCIL
;
937 tex
->key
.flags
|= SVGA3D_SURFACE_BIND_DEPTH_STENCIL
;
940 tex
->key
.numMipLevels
= template->last_level
+ 1;
942 tex
->key
.format
= svga_translate_format(svgascreen
, template->format
,
944 if (tex
->key
.format
== SVGA3D_FORMAT_INVALID
) {
948 /* The actual allocation is done with a typeless format. Typeless
949 * formats can be reinterpreted as other formats. For example,
950 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
951 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
952 * Do not use typeless formats for SHARED, DISPLAY_TARGET or SCANOUT
955 if (svgascreen
->sws
->have_vgpu10
956 && ((bindings
& (PIPE_BIND_SHARED
|
957 PIPE_BIND_DISPLAY_TARGET
|
958 PIPE_BIND_SCANOUT
)) == 0)) {
959 SVGA3dSurfaceFormat typeless
= svga_typeless_format(tex
->key
.format
);
961 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
962 svga_format_name(tex
->key
.format
),
963 svga_format_name(typeless
),
966 tex
->key
.format
= typeless
;
969 SVGA_DBG(DEBUG_DMA
, "surface_create for texture\n", tex
->handle
);
970 tex
->handle
= svga_screen_surface_create(svgascreen
, bindings
,
971 tex
->b
.b
.usage
, &tex
->key
);
976 SVGA_DBG(DEBUG_DMA
, " --> got sid %p (texture)\n", tex
->handle
);
978 debug_reference(&tex
->b
.b
.reference
,
979 (debug_reference_descriptor
)debug_describe_resource
, 0);
981 tex
->size
= util_resource_size(template);
982 svgascreen
->hud
.total_resource_bytes
+= tex
->size
;
983 svgascreen
->hud
.num_resources
++;
990 if (tex
->rendered_to
)
991 FREE(tex
->rendered_to
);
999 struct pipe_resource
*
1000 svga_texture_from_handle(struct pipe_screen
*screen
,
1001 const struct pipe_resource
*template,
1002 struct winsys_handle
*whandle
)
1004 struct svga_winsys_screen
*sws
= svga_winsys_screen(screen
);
1005 struct svga_screen
*ss
= svga_screen(screen
);
1006 struct svga_winsys_surface
*srf
;
1007 struct svga_texture
*tex
;
1008 enum SVGA3dSurfaceFormat format
= 0;
1011 /* Only supports one type */
1012 if ((template->target
!= PIPE_TEXTURE_2D
&&
1013 template->target
!= PIPE_TEXTURE_RECT
) ||
1014 template->last_level
!= 0 ||
1015 template->depth0
!= 1) {
1019 srf
= sws
->surface_from_handle(sws
, whandle
, &format
);
1024 if (svga_translate_format(svga_screen(screen
), template->format
,
1025 template->bind
) != format
) {
1026 unsigned f1
= svga_translate_format(svga_screen(screen
),
1027 template->format
, template->bind
);
1028 unsigned f2
= format
;
1030 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
1032 if (f1
== SVGA3D_B8G8R8A8_UNORM
)
1033 f1
= SVGA3D_A8R8G8B8
;
1034 if (f1
== SVGA3D_B8G8R8X8_UNORM
)
1035 f1
= SVGA3D_X8R8G8B8
;
1037 if ( !( (f1
== f2
) ||
1038 (f1
== SVGA3D_X8R8G8B8
&& f2
== SVGA3D_A8R8G8B8
) ||
1039 (f1
== SVGA3D_X8R8G8B8
&& f2
== SVGA3D_B8G8R8X8_UNORM
) ||
1040 (f1
== SVGA3D_A8R8G8B8
&& f2
== SVGA3D_X8R8G8B8
) ||
1041 (f1
== SVGA3D_A8R8G8B8
&& f2
== SVGA3D_B8G8R8A8_UNORM
) ||
1042 (f1
== SVGA3D_Z_D24X8
&& f2
== SVGA3D_Z_D24S8
) ||
1043 (f1
== SVGA3D_Z_DF24
&& f2
== SVGA3D_Z_D24S8_INT
) ) ) {
1044 debug_printf("%s wrong format %s != %s\n", __FUNCTION__
,
1045 svga_format_name(f1
), svga_format_name(f2
));
1050 tex
= CALLOC_STRUCT(svga_texture
);
1054 tex
->defined
= CALLOC(template->depth0
* template->array_size
,
1055 sizeof(tex
->defined
[0]));
1056 if (!tex
->defined
) {
1061 tex
->b
.b
= *template;
1062 tex
->b
.vtbl
= &svga_texture_vtbl
;
1063 pipe_reference_init(&tex
->b
.b
.reference
, 1);
1064 tex
->b
.b
.screen
= screen
;
1066 SVGA_DBG(DEBUG_DMA
, "wrap surface sid %p\n", srf
);
1068 tex
->key
.cachable
= 0;
1069 tex
->key
.format
= format
;
1072 tex
->rendered_to
= CALLOC(1, sizeof(tex
->rendered_to
[0]));
1073 if (!tex
->rendered_to
)
1076 tex
->dirty
= CALLOC(1, sizeof(tex
->dirty
[0]));
1080 tex
->imported
= TRUE
;
1082 ss
->hud
.num_resources
++;
1089 if (tex
->rendered_to
)
1090 FREE(tex
->rendered_to
);
1098 svga_texture_generate_mipmap(struct pipe_context
*pipe
,
1099 struct pipe_resource
*pt
,
1100 enum pipe_format format
,
1101 unsigned base_level
,
1102 unsigned last_level
,
1103 unsigned first_layer
,
1104 unsigned last_layer
)
1106 struct pipe_sampler_view templ
, *psv
;
1107 struct svga_pipe_sampler_view
*sv
;
1108 struct svga_context
*svga
= svga_context(pipe
);
1109 struct svga_texture
*tex
= svga_texture(pt
);
1110 enum pipe_error ret
;
1112 assert(svga_have_vgpu10(svga
));
1114 /* Only support 2D texture for now */
1115 if (pt
->target
!= PIPE_TEXTURE_2D
)
1118 /* Fallback to the mipmap generation utility for those formats that
1119 * do not support hw generate mipmap
1121 if (!svga_format_support_gen_mips(format
))
1124 /* Make sure the texture surface was created with
1125 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1127 if (!tex
->handle
|| !(tex
->key
.flags
& SVGA3D_SURFACE_BIND_RENDER_TARGET
))
1130 templ
.format
= format
;
1131 templ
.u
.tex
.first_layer
= first_layer
;
1132 templ
.u
.tex
.last_layer
= last_layer
;
1133 templ
.u
.tex
.first_level
= base_level
;
1134 templ
.u
.tex
.last_level
= last_level
;
1136 psv
= pipe
->create_sampler_view(pipe
, pt
, &templ
);
1140 sv
= svga_pipe_sampler_view(psv
);
1141 ret
= svga_validate_pipe_sampler_view(svga
, sv
);
1142 if (ret
!= PIPE_OK
) {
1143 svga_context_flush(svga
, NULL
);
1144 ret
= svga_validate_pipe_sampler_view(svga
, sv
);
1145 assert(ret
== PIPE_OK
);
1148 ret
= SVGA3D_vgpu10_GenMips(svga
->swc
, sv
->id
, tex
->handle
);
1149 if (ret
!= PIPE_OK
) {
1150 svga_context_flush(svga
, NULL
);
1151 ret
= SVGA3D_vgpu10_GenMips(svga
->swc
, sv
->id
, tex
->handle
);
1153 pipe_sampler_view_reference(&psv
, NULL
);
1155 svga
->hud
.num_generate_mipmap
++;