svga: add a few more resource updates HUD query
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "os/os_time.h"
33 #include "util/u_format.h"
34 #include "util/u_inlines.h"
35 #include "util/u_math.h"
36 #include "util/u_memory.h"
37 #include "util/u_resource.h"
38
39 #include "svga_cmd.h"
40 #include "svga_format.h"
41 #include "svga_screen.h"
42 #include "svga_context.h"
43 #include "svga_resource_texture.h"
44 #include "svga_resource_buffer.h"
45 #include "svga_sampler_view.h"
46 #include "svga_winsys.h"
47 #include "svga_debug.h"
48
49
50 static void
51 svga_transfer_dma_band(struct svga_context *svga,
52 struct svga_transfer *st,
53 SVGA3dTransferType transfer,
54 unsigned y, unsigned h, unsigned srcy,
55 SVGA3dSurfaceDMAFlags flags)
56 {
57 struct svga_texture *texture = svga_texture(st->base.resource);
58 SVGA3dCopyBox box;
59 enum pipe_error ret;
60
61 assert(!st->use_direct_map);
62
63 box.x = st->base.box.x;
64 box.y = y;
65 box.z = st->base.box.z;
66 box.w = st->base.box.width;
67 box.h = h;
68 box.d = 1;
69 box.srcx = 0;
70 box.srcy = srcy;
71 box.srcz = 0;
72
73 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
74 "(%u, %u, %u), %ubpp\n",
75 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
76 texture->handle,
77 st->slice,
78 st->base.box.x,
79 y,
80 box.z,
81 st->base.box.x + st->base.box.width,
82 y + h,
83 box.z + 1,
84 util_format_get_blocksize(texture->b.b.format) * 8 /
85 (util_format_get_blockwidth(texture->b.b.format)
86 * util_format_get_blockheight(texture->b.b.format)));
87
88 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
89 if (ret != PIPE_OK) {
90 svga_context_flush(svga, NULL);
91 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
92 assert(ret == PIPE_OK);
93 }
94 }
95
96
97 static void
98 svga_transfer_dma(struct svga_context *svga,
99 struct svga_transfer *st,
100 SVGA3dTransferType transfer,
101 SVGA3dSurfaceDMAFlags flags)
102 {
103 struct svga_texture *texture = svga_texture(st->base.resource);
104 struct svga_screen *screen = svga_screen(texture->b.b.screen);
105 struct svga_winsys_screen *sws = screen->sws;
106 struct pipe_fence_handle *fence = NULL;
107
108 assert(!st->use_direct_map);
109
110 if (transfer == SVGA3D_READ_HOST_VRAM) {
111 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
112 }
113
114 /* Ensure any pending operations on host surfaces are queued on the command
115 * buffer first.
116 */
117 svga_surfaces_flush( svga );
118
119 if (!st->swbuf) {
120 /* Do the DMA transfer in a single go */
121 svga_transfer_dma_band(svga, st, transfer,
122 st->base.box.y, st->base.box.height, 0,
123 flags);
124
125 if (transfer == SVGA3D_READ_HOST_VRAM) {
126 svga_context_flush(svga, &fence);
127 sws->fence_finish(sws, fence, 0);
128 sws->fence_reference(sws, &fence, NULL);
129 }
130 }
131 else {
132 int y, h, srcy;
133 unsigned blockheight =
134 util_format_get_blockheight(st->base.resource->format);
135
136 h = st->hw_nblocksy * blockheight;
137 srcy = 0;
138
139 for (y = 0; y < st->base.box.height; y += h) {
140 unsigned offset, length;
141 void *hw, *sw;
142
143 if (y + h > st->base.box.height)
144 h = st->base.box.height - y;
145
146 /* Transfer band must be aligned to pixel block boundaries */
147 assert(y % blockheight == 0);
148 assert(h % blockheight == 0);
149
150 offset = y * st->base.stride / blockheight;
151 length = h * st->base.stride / blockheight;
152
153 sw = (uint8_t *) st->swbuf + offset;
154
155 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
156 unsigned usage = PIPE_TRANSFER_WRITE;
157
158 /* Wait for the previous DMAs to complete */
159 /* TODO: keep one DMA (at half the size) in the background */
160 if (y) {
161 svga_context_flush(svga, NULL);
162 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
163 }
164
165 hw = sws->buffer_map(sws, st->hwbuf, usage);
166 assert(hw);
167 if (hw) {
168 memcpy(hw, sw, length);
169 sws->buffer_unmap(sws, st->hwbuf);
170 }
171 }
172
173 svga_transfer_dma_band(svga, st, transfer, y, h, srcy, flags);
174
175 /*
176 * Prevent the texture contents to be discarded on the next band
177 * upload.
178 */
179 flags.discard = FALSE;
180
181 if (transfer == SVGA3D_READ_HOST_VRAM) {
182 svga_context_flush(svga, &fence);
183 sws->fence_finish(sws, fence, 0);
184
185 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
186 assert(hw);
187 if (hw) {
188 memcpy(sw, hw, length);
189 sws->buffer_unmap(sws, st->hwbuf);
190 }
191 }
192 }
193 }
194 }
195
196
197 static boolean
198 svga_texture_get_handle(struct pipe_screen *screen,
199 struct pipe_resource *texture,
200 struct winsys_handle *whandle)
201 {
202 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
203 unsigned stride;
204
205 assert(svga_texture(texture)->key.cachable == 0);
206 svga_texture(texture)->key.cachable = 0;
207
208 stride = util_format_get_nblocksx(texture->format, texture->width0) *
209 util_format_get_blocksize(texture->format);
210
211 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
212 stride, whandle);
213 }
214
215
216 static void
217 svga_texture_destroy(struct pipe_screen *screen,
218 struct pipe_resource *pt)
219 {
220 struct svga_screen *ss = svga_screen(screen);
221 struct svga_texture *tex = svga_texture(pt);
222
223 ss->texture_timestamp++;
224
225 svga_sampler_view_reference(&tex->cached_view, NULL);
226
227 /*
228 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
229 */
230 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
231 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
232
233 ss->hud.total_resource_bytes -= tex->size;
234
235 FREE(tex->defined);
236 FREE(tex->rendered_to);
237 FREE(tex);
238
239 assert(ss->hud.num_resources > 0);
240 if (ss->hud.num_resources > 0)
241 ss->hud.num_resources--;
242 }
243
244
245 /**
246 * Determine if we need to read back a texture image before mapping it.
247 */
248 static boolean
249 need_tex_readback(struct pipe_transfer *transfer)
250 {
251 struct svga_texture *t = svga_texture(transfer->resource);
252
253 if (transfer->usage & PIPE_TRANSFER_READ)
254 return TRUE;
255
256 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
257 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
258 unsigned face;
259
260 if (transfer->resource->target == PIPE_TEXTURE_CUBE) {
261 assert(transfer->box.depth == 1);
262 face = transfer->box.z;
263 }
264 else {
265 face = 0;
266 }
267 if (svga_was_texture_rendered_to(t, face, transfer->level)) {
268 return TRUE;
269 }
270 }
271
272 return FALSE;
273 }
274
275
276 static enum pipe_error
277 readback_image_vgpu9(struct svga_context *svga,
278 struct svga_winsys_surface *surf,
279 unsigned slice,
280 unsigned level)
281 {
282 enum pipe_error ret;
283
284 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
285 if (ret != PIPE_OK) {
286 svga_context_flush(svga, NULL);
287 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
288 }
289 return ret;
290 }
291
292
293 static enum pipe_error
294 readback_image_vgpu10(struct svga_context *svga,
295 struct svga_winsys_surface *surf,
296 unsigned slice,
297 unsigned level,
298 unsigned numMipLevels)
299 {
300 enum pipe_error ret;
301 unsigned subResource;
302
303 subResource = slice * numMipLevels + level;
304 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
305 if (ret != PIPE_OK) {
306 svga_context_flush(svga, NULL);
307 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
308 }
309 return ret;
310 }
311
312
313 static void *
314 svga_texture_transfer_map(struct pipe_context *pipe,
315 struct pipe_resource *texture,
316 unsigned level,
317 unsigned usage,
318 const struct pipe_box *box,
319 struct pipe_transfer **ptransfer)
320 {
321 struct svga_context *svga = svga_context(pipe);
322 struct svga_screen *ss = svga_screen(pipe->screen);
323 struct svga_winsys_screen *sws = ss->sws;
324 struct svga_texture *tex = svga_texture(texture);
325 struct svga_transfer *st;
326 unsigned nblocksx, nblocksy;
327 boolean use_direct_map = svga_have_gb_objects(svga) &&
328 !svga_have_gb_dma(svga);
329 unsigned d;
330 void *returnVal;
331 int64_t begin = os_time_get();
332
333 /* We can't map texture storage directly unless we have GB objects */
334 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
335 if (svga_have_gb_objects(svga))
336 use_direct_map = TRUE;
337 else
338 return NULL;
339 }
340
341 st = CALLOC_STRUCT(svga_transfer);
342 if (!st)
343 return NULL;
344
345 {
346 unsigned w, h;
347 if (use_direct_map) {
348 /* we'll directly access the guest-backed surface */
349 w = u_minify(texture->width0, level);
350 h = u_minify(texture->height0, level);
351 d = u_minify(texture->depth0, level);
352 }
353 else {
354 /* we'll put the data into a tightly packed buffer */
355 w = box->width;
356 h = box->height;
357 d = box->depth;
358 }
359 nblocksx = util_format_get_nblocksx(texture->format, w);
360 nblocksy = util_format_get_nblocksy(texture->format, h);
361 }
362
363 pipe_resource_reference(&st->base.resource, texture);
364
365 st->base.level = level;
366 st->base.usage = usage;
367 st->base.box = *box;
368 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
369 st->base.layer_stride = st->base.stride * nblocksy;
370
371 switch (tex->b.b.target) {
372 case PIPE_TEXTURE_CUBE:
373 case PIPE_TEXTURE_2D_ARRAY:
374 case PIPE_TEXTURE_1D_ARRAY:
375 st->slice = st->base.box.z;
376 st->base.box.z = 0; /* so we don't apply double offsets below */
377 break;
378 default:
379 st->slice = 0;
380 break;
381 }
382
383 if (usage & PIPE_TRANSFER_WRITE) {
384 /* record texture upload for HUD */
385 svga->hud.num_bytes_uploaded +=
386 nblocksx * nblocksy * d * util_format_get_blocksize(texture->format);
387 }
388
389 if (!use_direct_map) {
390 /* Use a DMA buffer */
391 st->hw_nblocksy = nblocksy;
392
393 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
394 st->hw_nblocksy * st->base.stride * d);
395 while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
396 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
397 st->hw_nblocksy * st->base.stride * d);
398 }
399
400 if (!st->hwbuf) {
401 FREE(st);
402 return NULL;
403 }
404
405 if (st->hw_nblocksy < nblocksy) {
406 /* We couldn't allocate a hardware buffer big enough for the transfer,
407 * so allocate regular malloc memory instead */
408 if (0) {
409 debug_printf("%s: failed to allocate %u KB of DMA, "
410 "splitting into %u x %u KB DMA transfers\n",
411 __FUNCTION__,
412 (nblocksy*st->base.stride + 1023)/1024,
413 (nblocksy + st->hw_nblocksy - 1)/st->hw_nblocksy,
414 (st->hw_nblocksy*st->base.stride + 1023)/1024);
415 }
416
417 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
418 if (!st->swbuf) {
419 sws->buffer_destroy(sws, st->hwbuf);
420 FREE(st);
421 return NULL;
422 }
423 }
424
425 if (usage & PIPE_TRANSFER_READ) {
426 SVGA3dSurfaceDMAFlags flags;
427 memset(&flags, 0, sizeof flags);
428 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
429 }
430 } else {
431 struct pipe_transfer *transfer = &st->base;
432 struct svga_winsys_surface *surf = tex->handle;
433
434 if (!surf) {
435 FREE(st);
436 return NULL;
437 }
438
439 if (need_tex_readback(transfer)) {
440 enum pipe_error ret;
441
442 svga_surfaces_flush(svga);
443
444 if (svga_have_vgpu10(svga)) {
445 ret = readback_image_vgpu10(svga, surf, st->slice, transfer->level,
446 tex->b.b.last_level + 1);
447 } else {
448 ret = readback_image_vgpu9(svga, surf, st->slice, transfer->level);
449 }
450
451 svga->hud.num_readbacks++;
452
453 assert(ret == PIPE_OK);
454 (void) ret;
455
456 svga_context_flush(svga, NULL);
457
458 /*
459 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
460 * we could potentially clear the flag for all faces/layers/mips.
461 */
462 svga_clear_texture_rendered_to(tex, st->slice, transfer->level);
463 }
464 else {
465 assert(transfer->usage & PIPE_TRANSFER_WRITE);
466 if ((transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
467 svga_surfaces_flush(svga);
468 if (!sws->surface_is_flushed(sws, surf)) {
469 svga->hud.surface_write_flushes++;
470 svga_context_flush(svga, NULL);
471 }
472 }
473 }
474 }
475
476 st->use_direct_map = use_direct_map;
477
478 *ptransfer = &st->base;
479
480 /*
481 * Begin mapping code
482 */
483 if (st->swbuf) {
484 returnVal = st->swbuf;
485 }
486 else if (!st->use_direct_map) {
487 returnVal = sws->buffer_map(sws, st->hwbuf, usage);
488 }
489 else {
490 SVGA3dSize baseLevelSize;
491 struct svga_texture *tex = svga_texture(texture);
492 struct svga_winsys_surface *surf = tex->handle;
493 uint8_t *map;
494 boolean retry;
495 unsigned offset, mip_width, mip_height;
496 unsigned xoffset = st->base.box.x;
497 unsigned yoffset = st->base.box.y;
498 unsigned zoffset = st->base.box.z;
499
500 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
501 if (map == NULL && retry) {
502 /*
503 * At this point, the svga_surfaces_flush() should already have
504 * called in svga_texture_get_transfer().
505 */
506 svga_context_flush(svga, NULL);
507 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
508 }
509
510 /*
511 * Make sure we return NULL if the map fails
512 */
513 if (!map) {
514 FREE(st);
515 return map;
516 }
517
518 /**
519 * Compute the offset to the specific texture slice in the buffer.
520 */
521 baseLevelSize.width = tex->b.b.width0;
522 baseLevelSize.height = tex->b.b.height0;
523 baseLevelSize.depth = tex->b.b.depth0;
524
525 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
526 tex->b.b.last_level + 1, /* numMips */
527 st->slice, level);
528 if (level > 0) {
529 assert(offset > 0);
530 }
531
532 mip_width = u_minify(tex->b.b.width0, level);
533 mip_height = u_minify(tex->b.b.height0, level);
534
535 offset += svga3dsurface_get_pixel_offset(tex->key.format,
536 mip_width, mip_height,
537 xoffset, yoffset, zoffset);
538 returnVal = (void *) (map + offset);
539 }
540
541 svga->hud.map_buffer_time += (os_time_get() - begin);
542 svga->hud.num_resources_mapped++;
543
544 return returnVal;
545 }
546
547
548 /**
549 * Unmap a GB texture surface.
550 */
551 static void
552 svga_texture_surface_unmap(struct svga_context *svga,
553 struct pipe_transfer *transfer)
554 {
555 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
556 struct svga_winsys_context *swc = svga->swc;
557 boolean rebind;
558
559 assert(surf);
560
561 swc->surface_unmap(swc, surf, &rebind);
562 if (rebind) {
563 enum pipe_error ret;
564 ret = SVGA3D_BindGBSurface(swc, surf);
565 if (ret != PIPE_OK) {
566 /* flush and retry */
567 svga_context_flush(svga, NULL);
568 ret = SVGA3D_BindGBSurface(swc, surf);
569 assert(ret == PIPE_OK);
570 }
571 }
572 }
573
574
575 static enum pipe_error
576 update_image_vgpu9(struct svga_context *svga,
577 struct svga_winsys_surface *surf,
578 const SVGA3dBox *box,
579 unsigned slice,
580 unsigned level)
581 {
582 enum pipe_error ret;
583
584 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
585 if (ret != PIPE_OK) {
586 svga_context_flush(svga, NULL);
587 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
588 }
589 return ret;
590 }
591
592
593 static enum pipe_error
594 update_image_vgpu10(struct svga_context *svga,
595 struct svga_winsys_surface *surf,
596 const SVGA3dBox *box,
597 unsigned slice,
598 unsigned level,
599 unsigned numMipLevels)
600 {
601 enum pipe_error ret;
602 unsigned subResource;
603
604 subResource = slice * numMipLevels + level;
605 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
606 if (ret != PIPE_OK) {
607 svga_context_flush(svga, NULL);
608 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
609 }
610 return ret;
611 }
612
613
614 static void
615 svga_texture_transfer_unmap(struct pipe_context *pipe,
616 struct pipe_transfer *transfer)
617 {
618 struct svga_context *svga = svga_context(pipe);
619 struct svga_screen *ss = svga_screen(pipe->screen);
620 struct svga_winsys_screen *sws = ss->sws;
621 struct svga_transfer *st = svga_transfer(transfer);
622 struct svga_texture *tex = svga_texture(transfer->resource);
623
624 if (!st->swbuf) {
625 if (st->use_direct_map) {
626 svga_texture_surface_unmap(svga, transfer);
627 }
628 else {
629 sws->buffer_unmap(sws, st->hwbuf);
630 }
631 }
632
633 if (!st->use_direct_map && (st->base.usage & PIPE_TRANSFER_WRITE)) {
634 /* Use DMA to transfer texture data */
635 SVGA3dSurfaceDMAFlags flags;
636
637 memset(&flags, 0, sizeof flags);
638 if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
639 flags.discard = TRUE;
640 }
641 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
642 flags.unsynchronized = TRUE;
643 }
644
645 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
646 } else if (transfer->usage & PIPE_TRANSFER_WRITE) {
647 struct svga_winsys_surface *surf =
648 svga_texture(transfer->resource)->handle;
649 SVGA3dBox box;
650 enum pipe_error ret;
651
652 assert(svga_have_gb_objects(svga));
653
654 /* update the effected region */
655 box.x = transfer->box.x;
656 box.y = transfer->box.y;
657 switch (tex->b.b.target) {
658 case PIPE_TEXTURE_CUBE:
659 case PIPE_TEXTURE_2D_ARRAY:
660 box.z = 0;
661 break;
662 case PIPE_TEXTURE_1D_ARRAY:
663 box.y = box.z = 0;
664 break;
665 default:
666 box.z = transfer->box.z;
667 break;
668 }
669 box.w = transfer->box.width;
670 box.h = transfer->box.height;
671 box.d = transfer->box.depth;
672
673 if (0)
674 debug_printf("%s %d, %d, %d %d x %d x %d\n",
675 __FUNCTION__,
676 box.x, box.y, box.z,
677 box.w, box.h, box.d);
678
679 if (svga_have_vgpu10(svga)) {
680 ret = update_image_vgpu10(svga, surf, &box, st->slice, transfer->level,
681 tex->b.b.last_level + 1);
682 } else {
683 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
684 }
685
686 svga->hud.num_resource_updates++;
687
688 assert(ret == PIPE_OK);
689 (void) ret;
690 }
691
692 ss->texture_timestamp++;
693 svga_age_texture_view(tex, transfer->level);
694 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
695 svga_define_texture_level(tex, st->slice, transfer->level);
696 else
697 svga_define_texture_level(tex, 0, transfer->level);
698
699 pipe_resource_reference(&st->base.resource, NULL);
700
701 FREE(st->swbuf);
702 if (!st->use_direct_map) {
703 sws->buffer_destroy(sws, st->hwbuf);
704 }
705 FREE(st);
706 }
707
708
709 /**
710 * Does format store depth values?
711 */
712 static inline boolean
713 format_has_depth(enum pipe_format format)
714 {
715 const struct util_format_description *desc = util_format_description(format);
716 return util_format_has_depth(desc);
717 }
718
719
720 struct u_resource_vtbl svga_texture_vtbl =
721 {
722 svga_texture_get_handle, /* get_handle */
723 svga_texture_destroy, /* resource_destroy */
724 svga_texture_transfer_map, /* transfer_map */
725 u_default_transfer_flush_region, /* transfer_flush_region */
726 svga_texture_transfer_unmap, /* transfer_unmap */
727 u_default_transfer_inline_write /* transfer_inline_write */
728 };
729
730
731 struct pipe_resource *
732 svga_texture_create(struct pipe_screen *screen,
733 const struct pipe_resource *template)
734 {
735 struct svga_screen *svgascreen = svga_screen(screen);
736 struct svga_texture *tex;
737 unsigned bindings = template->bind;
738
739 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
740 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
741 return NULL;
742 }
743
744 tex = CALLOC_STRUCT(svga_texture);
745 if (!tex) {
746 return NULL;
747 }
748
749 tex->defined = CALLOC(template->depth0 * template->array_size,
750 sizeof(tex->defined[0]));
751 if (!tex->defined) {
752 FREE(tex);
753 return NULL;
754 }
755
756 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
757 sizeof(tex->rendered_to[0]));
758 if (!tex->rendered_to) {
759 FREE(tex->defined);
760 FREE(tex);
761 return NULL;
762 }
763
764 tex->b.b = *template;
765 tex->b.vtbl = &svga_texture_vtbl;
766 pipe_reference_init(&tex->b.b.reference, 1);
767 tex->b.b.screen = screen;
768
769 tex->key.flags = 0;
770 tex->key.size.width = template->width0;
771 tex->key.size.height = template->height0;
772 tex->key.size.depth = template->depth0;
773 tex->key.arraySize = 1;
774 tex->key.numFaces = 1;
775 tex->key.sampleCount = template->nr_samples;
776
777 if (template->nr_samples > 1) {
778 tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
779 }
780
781 if (svgascreen->sws->have_vgpu10) {
782 switch (template->target) {
783 case PIPE_TEXTURE_1D:
784 tex->key.flags |= SVGA3D_SURFACE_1D;
785 break;
786 case PIPE_TEXTURE_1D_ARRAY:
787 tex->key.flags |= SVGA3D_SURFACE_1D;
788 /* fall-through */
789 case PIPE_TEXTURE_2D_ARRAY:
790 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
791 tex->key.arraySize = template->array_size;
792 break;
793 case PIPE_TEXTURE_3D:
794 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
795 break;
796 case PIPE_TEXTURE_CUBE:
797 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
798 tex->key.numFaces = 6;
799 break;
800 default:
801 break;
802 }
803 }
804 else {
805 switch (template->target) {
806 case PIPE_TEXTURE_3D:
807 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
808 break;
809 case PIPE_TEXTURE_CUBE:
810 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
811 tex->key.numFaces = 6;
812 break;
813 default:
814 break;
815 }
816 }
817
818 tex->key.cachable = 1;
819
820 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
821 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
822 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
823
824 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
825 /* Also check if the format is renderable */
826 if (screen->is_format_supported(screen, template->format,
827 template->target,
828 template->nr_samples,
829 PIPE_BIND_RENDER_TARGET)) {
830 bindings |= PIPE_BIND_RENDER_TARGET;
831 }
832 }
833 }
834
835 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
836 tex->key.cachable = 0;
837 }
838
839 if (bindings & PIPE_BIND_SHARED) {
840 tex->key.cachable = 0;
841 }
842
843 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
844 tex->key.scanout = 1;
845 tex->key.cachable = 0;
846 }
847
848 /*
849 * Note: Previously we never passed the
850 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
851 * know beforehand whether a texture will be used as a rendertarget or not
852 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
853 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
854 *
855 * However, this was changed since other state trackers
856 * (XA for example) uses it accurately and certain device versions
857 * relies on it in certain situations to render correctly.
858 */
859 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
860 !util_format_is_s3tc(template->format)) {
861 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
862 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
863 }
864
865 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
866 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
867 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
868 }
869
870 tex->key.numMipLevels = template->last_level + 1;
871
872 tex->key.format = svga_translate_format(svgascreen, template->format,
873 bindings);
874 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
875 FREE(tex->defined);
876 FREE(tex->rendered_to);
877 FREE(tex);
878 return NULL;
879 }
880
881 /* Use typeless formats for sRGB and depth resources. Typeless
882 * formats can be reinterpreted as other formats. For example,
883 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
884 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
885 */
886 if (svgascreen->sws->have_vgpu10 &&
887 (util_format_is_srgb(template->format) ||
888 format_has_depth(template->format))) {
889 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
890 if (0) {
891 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
892 svga_format_name(tex->key.format),
893 svga_format_name(typeless),
894 bindings);
895 }
896 tex->key.format = typeless;
897 }
898
899 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
900 tex->handle = svga_screen_surface_create(svgascreen, bindings,
901 tex->b.b.usage, &tex->key);
902 if (!tex->handle) {
903 FREE(tex->defined);
904 FREE(tex->rendered_to);
905 FREE(tex);
906 return NULL;
907 }
908
909 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
910
911 debug_reference(&tex->b.b.reference,
912 (debug_reference_descriptor)debug_describe_resource, 0);
913
914 tex->size = util_resource_size(template);
915 svgascreen->hud.total_resource_bytes += tex->size;
916 svgascreen->hud.num_resources++;
917
918 return &tex->b.b;
919 }
920
921
922 struct pipe_resource *
923 svga_texture_from_handle(struct pipe_screen *screen,
924 const struct pipe_resource *template,
925 struct winsys_handle *whandle)
926 {
927 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
928 struct svga_screen *ss = svga_screen(screen);
929 struct svga_winsys_surface *srf;
930 struct svga_texture *tex;
931 enum SVGA3dSurfaceFormat format = 0;
932 assert(screen);
933
934 /* Only supports one type */
935 if ((template->target != PIPE_TEXTURE_2D &&
936 template->target != PIPE_TEXTURE_RECT) ||
937 template->last_level != 0 ||
938 template->depth0 != 1) {
939 return NULL;
940 }
941
942 srf = sws->surface_from_handle(sws, whandle, &format);
943
944 if (!srf)
945 return NULL;
946
947 if (svga_translate_format(svga_screen(screen), template->format,
948 template->bind) != format) {
949 unsigned f1 = svga_translate_format(svga_screen(screen),
950 template->format, template->bind);
951 unsigned f2 = format;
952
953 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
954 */
955 if (f1 == SVGA3D_B8G8R8A8_UNORM)
956 f1 = SVGA3D_A8R8G8B8;
957 if (f1 == SVGA3D_B8G8R8X8_UNORM)
958 f1 = SVGA3D_X8R8G8B8;
959
960 if ( !( (f1 == f2) ||
961 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
962 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_B8G8R8X8_UNORM) ||
963 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
964 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_B8G8R8A8_UNORM) ||
965 (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
966 (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
967 debug_printf("%s wrong format %s != %s\n", __FUNCTION__,
968 svga_format_name(f1), svga_format_name(f2));
969 return NULL;
970 }
971 }
972
973 tex = CALLOC_STRUCT(svga_texture);
974 if (!tex)
975 return NULL;
976
977 tex->defined = CALLOC(template->depth0 * template->array_size,
978 sizeof(tex->defined[0]));
979 if (!tex->defined) {
980 FREE(tex);
981 return NULL;
982 }
983
984 tex->b.b = *template;
985 tex->b.vtbl = &svga_texture_vtbl;
986 pipe_reference_init(&tex->b.b.reference, 1);
987 tex->b.b.screen = screen;
988
989 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
990
991 tex->key.cachable = 0;
992 tex->key.format = format;
993 tex->handle = srf;
994
995 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
996 tex->imported = TRUE;
997
998 ss->hud.num_resources++;
999
1000 return &tex->b.b;
1001 }
1002
1003 boolean
1004 svga_texture_generate_mipmap(struct pipe_context *pipe,
1005 struct pipe_resource *pt,
1006 enum pipe_format format,
1007 unsigned base_level,
1008 unsigned last_level,
1009 unsigned first_layer,
1010 unsigned last_layer)
1011 {
1012 struct pipe_sampler_view templ, *psv;
1013 struct svga_pipe_sampler_view *sv;
1014 struct svga_context *svga = svga_context(pipe);
1015 struct svga_texture *tex = svga_texture(pt);
1016 enum pipe_error ret;
1017
1018 assert(svga_have_vgpu10(svga));
1019
1020 /* Only support 2D texture for now */
1021 if (pt->target != PIPE_TEXTURE_2D)
1022 return FALSE;
1023
1024 /* Fallback to the mipmap generation utility for those formats that
1025 * do not support hw generate mipmap
1026 */
1027 if (!svga_format_support_gen_mips(format))
1028 return FALSE;
1029
1030 /* Make sure the texture surface was created with
1031 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1032 */
1033 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1034 return FALSE;
1035
1036 templ.format = format;
1037 templ.u.tex.first_layer = first_layer;
1038 templ.u.tex.last_layer = last_layer;
1039 templ.u.tex.first_level = base_level;
1040 templ.u.tex.last_level = last_level;
1041
1042 psv = pipe->create_sampler_view(pipe, pt, &templ);
1043 if (psv == NULL)
1044 return FALSE;
1045
1046 sv = svga_pipe_sampler_view(psv);
1047 ret = svga_validate_pipe_sampler_view(svga, sv);
1048 if (ret != PIPE_OK) {
1049 svga_context_flush(svga, NULL);
1050 ret = svga_validate_pipe_sampler_view(svga, sv);
1051 assert(ret == PIPE_OK);
1052 }
1053
1054 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1055 if (ret != PIPE_OK) {
1056 svga_context_flush(svga, NULL);
1057 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1058 }
1059 pipe_sampler_view_reference(&psv, NULL);
1060
1061 svga->hud.num_generate_mipmap++;
1062
1063 return TRUE;
1064 }