svga: minor code simplification in svga_texture_transfer_unmap()
[mesa.git] / src / gallium / drivers / svga / svga_resource_texture.c
1 /**********************************************************
2 * Copyright 2008-2009 VMware, Inc. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person
5 * obtaining a copy of this software and associated documentation
6 * files (the "Software"), to deal in the Software without
7 * restriction, including without limitation the rights to use, copy,
8 * modify, merge, publish, distribute, sublicense, and/or sell copies
9 * of the Software, and to permit persons to whom the Software is
10 * furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be
13 * included in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
18 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
19 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
20 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
21 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 *
24 **********************************************************/
25
26 #include "svga3d_reg.h"
27 #include "svga3d_surfacedefs.h"
28
29 #include "pipe/p_state.h"
30 #include "pipe/p_defines.h"
31 #include "os/os_thread.h"
32 #include "util/u_format.h"
33 #include "util/u_inlines.h"
34 #include "util/u_math.h"
35 #include "util/u_memory.h"
36 #include "util/u_resource.h"
37
38 #include "svga_cmd.h"
39 #include "svga_format.h"
40 #include "svga_screen.h"
41 #include "svga_context.h"
42 #include "svga_resource_texture.h"
43 #include "svga_resource_buffer.h"
44 #include "svga_sampler_view.h"
45 #include "svga_winsys.h"
46 #include "svga_debug.h"
47
48
49 static void
50 svga_transfer_dma_band(struct svga_context *svga,
51 struct svga_transfer *st,
52 SVGA3dTransferType transfer,
53 unsigned x, unsigned y, unsigned z,
54 unsigned w, unsigned h, unsigned d,
55 unsigned srcx, unsigned srcy, unsigned srcz,
56 SVGA3dSurfaceDMAFlags flags)
57 {
58 struct svga_texture *texture = svga_texture(st->base.resource);
59 SVGA3dCopyBox box;
60 enum pipe_error ret;
61
62 assert(!st->use_direct_map);
63
64 box.x = x;
65 box.y = y;
66 box.z = z;
67 box.w = w;
68 box.h = h;
69 box.d = d;
70 box.srcx = srcx;
71 box.srcy = srcy;
72 box.srcz = srcz;
73
74 SVGA_DBG(DEBUG_DMA, "dma %s sid %p, face %u, (%u, %u, %u) - "
75 "(%u, %u, %u), %ubpp\n",
76 transfer == SVGA3D_WRITE_HOST_VRAM ? "to" : "from",
77 texture->handle,
78 st->slice,
79 x,
80 y,
81 z,
82 x + w,
83 y + h,
84 z + 1,
85 util_format_get_blocksize(texture->b.b.format) * 8 /
86 (util_format_get_blockwidth(texture->b.b.format)
87 * util_format_get_blockheight(texture->b.b.format)));
88
89 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
90 if (ret != PIPE_OK) {
91 svga_context_flush(svga, NULL);
92 ret = SVGA3D_SurfaceDMA(svga->swc, st, transfer, &box, 1, flags);
93 assert(ret == PIPE_OK);
94 }
95 }
96
97
98 static void
99 svga_transfer_dma(struct svga_context *svga,
100 struct svga_transfer *st,
101 SVGA3dTransferType transfer,
102 SVGA3dSurfaceDMAFlags flags)
103 {
104 struct svga_texture *texture = svga_texture(st->base.resource);
105 struct svga_screen *screen = svga_screen(texture->b.b.screen);
106 struct svga_winsys_screen *sws = screen->sws;
107 struct pipe_fence_handle *fence = NULL;
108
109 assert(!st->use_direct_map);
110
111 if (transfer == SVGA3D_READ_HOST_VRAM) {
112 SVGA_DBG(DEBUG_PERF, "%s: readback transfer\n", __FUNCTION__);
113 }
114
115 /* Ensure any pending operations on host surfaces are queued on the command
116 * buffer first.
117 */
118 svga_surfaces_flush( svga );
119
120 if (!st->swbuf) {
121 /* Do the DMA transfer in a single go */
122 svga_transfer_dma_band(svga, st, transfer,
123 st->base.box.x, st->base.box.y, st->base.box.z,
124 st->base.box.width, st->base.box.height, st->base.box.depth,
125 0, 0, 0,
126 flags);
127
128 if (transfer == SVGA3D_READ_HOST_VRAM) {
129 svga_context_flush(svga, &fence);
130 sws->fence_finish(sws, fence, 0);
131 sws->fence_reference(sws, &fence, NULL);
132 }
133 }
134 else {
135 int y, h, srcy;
136 unsigned blockheight =
137 util_format_get_blockheight(st->base.resource->format);
138
139 h = st->hw_nblocksy * blockheight;
140 srcy = 0;
141
142 for (y = 0; y < st->base.box.height; y += h) {
143 unsigned offset, length;
144 void *hw, *sw;
145
146 if (y + h > st->base.box.height)
147 h = st->base.box.height - y;
148
149 /* Transfer band must be aligned to pixel block boundaries */
150 assert(y % blockheight == 0);
151 assert(h % blockheight == 0);
152
153 offset = y * st->base.stride / blockheight;
154 length = h * st->base.stride / blockheight;
155
156 sw = (uint8_t *) st->swbuf + offset;
157
158 if (transfer == SVGA3D_WRITE_HOST_VRAM) {
159 unsigned usage = PIPE_TRANSFER_WRITE;
160
161 /* Wait for the previous DMAs to complete */
162 /* TODO: keep one DMA (at half the size) in the background */
163 if (y) {
164 svga_context_flush(svga, NULL);
165 usage |= PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE;
166 }
167
168 hw = sws->buffer_map(sws, st->hwbuf, usage);
169 assert(hw);
170 if (hw) {
171 memcpy(hw, sw, length);
172 sws->buffer_unmap(sws, st->hwbuf);
173 }
174 }
175
176 svga_transfer_dma_band(svga, st, transfer,
177 st->base.box.x, y, st->base.box.z,
178 st->base.box.width, h, st->base.box.depth,
179 0, srcy, 0, flags);
180
181 /*
182 * Prevent the texture contents to be discarded on the next band
183 * upload.
184 */
185 flags.discard = FALSE;
186
187 if (transfer == SVGA3D_READ_HOST_VRAM) {
188 svga_context_flush(svga, &fence);
189 sws->fence_finish(sws, fence, 0);
190
191 hw = sws->buffer_map(sws, st->hwbuf, PIPE_TRANSFER_READ);
192 assert(hw);
193 if (hw) {
194 memcpy(sw, hw, length);
195 sws->buffer_unmap(sws, st->hwbuf);
196 }
197 }
198 }
199 }
200 }
201
202
203 static boolean
204 svga_texture_get_handle(struct pipe_screen *screen,
205 struct pipe_resource *texture,
206 struct winsys_handle *whandle)
207 {
208 struct svga_winsys_screen *sws = svga_winsys_screen(texture->screen);
209 unsigned stride;
210
211 assert(svga_texture(texture)->key.cachable == 0);
212 svga_texture(texture)->key.cachable = 0;
213
214 stride = util_format_get_nblocksx(texture->format, texture->width0) *
215 util_format_get_blocksize(texture->format);
216
217 return sws->surface_get_handle(sws, svga_texture(texture)->handle,
218 stride, whandle);
219 }
220
221
222 static void
223 svga_texture_destroy(struct pipe_screen *screen,
224 struct pipe_resource *pt)
225 {
226 struct svga_screen *ss = svga_screen(screen);
227 struct svga_texture *tex = svga_texture(pt);
228
229 ss->texture_timestamp++;
230
231 svga_sampler_view_reference(&tex->cached_view, NULL);
232
233 /*
234 DBG("%s deleting %p\n", __FUNCTION__, (void *) tex);
235 */
236 SVGA_DBG(DEBUG_DMA, "unref sid %p (texture)\n", tex->handle);
237 svga_screen_surface_destroy(ss, &tex->key, &tex->handle);
238
239 ss->hud.total_resource_bytes -= tex->size;
240
241 FREE(tex->defined);
242 FREE(tex->rendered_to);
243 FREE(tex->dirty);
244 FREE(tex);
245
246 assert(ss->hud.num_resources > 0);
247 if (ss->hud.num_resources > 0)
248 ss->hud.num_resources--;
249 }
250
251
252 /**
253 * Determine if we need to read back a texture image before mapping it.
254 */
255 static boolean
256 need_tex_readback(struct pipe_transfer *transfer)
257 {
258 struct svga_texture *t = svga_texture(transfer->resource);
259
260 if (transfer->usage & PIPE_TRANSFER_READ)
261 return TRUE;
262
263 if ((transfer->usage & PIPE_TRANSFER_WRITE) &&
264 ((transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) == 0)) {
265 unsigned face;
266
267 if (transfer->resource->target == PIPE_TEXTURE_CUBE) {
268 assert(transfer->box.depth == 1);
269 face = transfer->box.z;
270 }
271 else {
272 face = 0;
273 }
274 if (svga_was_texture_rendered_to(t, face, transfer->level)) {
275 return TRUE;
276 }
277 }
278
279 return FALSE;
280 }
281
282
283 static enum pipe_error
284 readback_image_vgpu9(struct svga_context *svga,
285 struct svga_winsys_surface *surf,
286 unsigned slice,
287 unsigned level)
288 {
289 enum pipe_error ret;
290
291 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
292 if (ret != PIPE_OK) {
293 svga_context_flush(svga, NULL);
294 ret = SVGA3D_ReadbackGBImage(svga->swc, surf, slice, level);
295 }
296 return ret;
297 }
298
299
300 static enum pipe_error
301 readback_image_vgpu10(struct svga_context *svga,
302 struct svga_winsys_surface *surf,
303 unsigned slice,
304 unsigned level,
305 unsigned numMipLevels)
306 {
307 enum pipe_error ret;
308 unsigned subResource;
309
310 subResource = slice * numMipLevels + level;
311 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
312 if (ret != PIPE_OK) {
313 svga_context_flush(svga, NULL);
314 ret = SVGA3D_vgpu10_ReadbackSubResource(svga->swc, surf, subResource);
315 }
316 return ret;
317 }
318
319
320 static void *
321 svga_texture_transfer_map(struct pipe_context *pipe,
322 struct pipe_resource *texture,
323 unsigned level,
324 unsigned usage,
325 const struct pipe_box *box,
326 struct pipe_transfer **ptransfer)
327 {
328 struct svga_context *svga = svga_context(pipe);
329 struct svga_screen *ss = svga_screen(pipe->screen);
330 struct svga_winsys_screen *sws = ss->sws;
331 struct svga_texture *tex = svga_texture(texture);
332 struct svga_transfer *st;
333 unsigned nblocksx, nblocksy;
334 boolean use_direct_map = svga_have_gb_objects(svga) &&
335 !svga_have_gb_dma(svga);
336 unsigned d;
337 void *returnVal = NULL;
338 int64_t begin = svga_get_time(svga);
339
340 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERMAP);
341
342 /* We can't map texture storage directly unless we have GB objects */
343 if (usage & PIPE_TRANSFER_MAP_DIRECTLY) {
344 if (svga_have_gb_objects(svga))
345 use_direct_map = TRUE;
346 else
347 goto done;
348 }
349
350 st = CALLOC_STRUCT(svga_transfer);
351 if (!st)
352 goto done;
353
354 st->base.level = level;
355 st->base.usage = usage;
356 st->base.box = *box;
357
358 switch (tex->b.b.target) {
359 case PIPE_TEXTURE_CUBE:
360 st->slice = st->base.box.z;
361 st->base.box.z = 0; /* so we don't apply double offsets below */
362 break;
363 case PIPE_TEXTURE_2D_ARRAY:
364 case PIPE_TEXTURE_1D_ARRAY:
365 st->slice = st->base.box.z;
366 st->base.box.z = 0; /* so we don't apply double offsets below */
367
368 /* Force direct map for transfering multiple slices */
369 if (st->base.box.depth > 1)
370 use_direct_map = svga_have_gb_objects(svga);
371
372 break;
373 default:
374 st->slice = 0;
375 break;
376 }
377
378 {
379 unsigned w, h;
380 if (use_direct_map) {
381 /* we'll directly access the guest-backed surface */
382 w = u_minify(texture->width0, level);
383 h = u_minify(texture->height0, level);
384 d = u_minify(texture->depth0, level);
385 }
386 else {
387 /* we'll put the data into a tightly packed buffer */
388 w = box->width;
389 h = box->height;
390 d = box->depth;
391 }
392 nblocksx = util_format_get_nblocksx(texture->format, w);
393 nblocksy = util_format_get_nblocksy(texture->format, h);
394 }
395
396 pipe_resource_reference(&st->base.resource, texture);
397
398 st->base.stride = nblocksx*util_format_get_blocksize(texture->format);
399 st->base.layer_stride = st->base.stride * nblocksy;
400 st->use_direct_map = use_direct_map;
401
402 *ptransfer = &st->base;
403
404
405 if (usage & PIPE_TRANSFER_WRITE) {
406 /* record texture upload for HUD */
407 svga->hud.num_bytes_uploaded +=
408 nblocksx * nblocksy * d * util_format_get_blocksize(texture->format);
409 }
410
411 if (!use_direct_map) {
412 /* Use a DMA buffer */
413 st->hw_nblocksy = nblocksy;
414
415 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
416 st->hw_nblocksy * st->base.stride * d);
417 while(!st->hwbuf && (st->hw_nblocksy /= 2)) {
418 st->hwbuf = svga_winsys_buffer_create(svga, 1, 0,
419 st->hw_nblocksy * st->base.stride * d);
420 }
421
422 if (!st->hwbuf) {
423 FREE(st);
424 goto done;
425 }
426
427 if (st->hw_nblocksy < nblocksy) {
428 /* We couldn't allocate a hardware buffer big enough for the transfer,
429 * so allocate regular malloc memory instead */
430 if (0) {
431 debug_printf("%s: failed to allocate %u KB of DMA, "
432 "splitting into %u x %u KB DMA transfers\n",
433 __FUNCTION__,
434 (nblocksy * st->base.stride + 1023) / 1024,
435 (nblocksy + st->hw_nblocksy - 1) / st->hw_nblocksy,
436 (st->hw_nblocksy * st->base.stride + 1023) / 1024);
437 }
438
439 st->swbuf = MALLOC(nblocksy * st->base.stride * d);
440 if (!st->swbuf) {
441 sws->buffer_destroy(sws, st->hwbuf);
442 FREE(st);
443 goto done;
444 }
445 }
446
447 if (usage & PIPE_TRANSFER_READ) {
448 SVGA3dSurfaceDMAFlags flags;
449 memset(&flags, 0, sizeof flags);
450 svga_transfer_dma(svga, st, SVGA3D_READ_HOST_VRAM, flags);
451 }
452 } else {
453 struct pipe_transfer *transfer = &st->base;
454 struct svga_winsys_surface *surf = tex->handle;
455
456 if (!surf) {
457 FREE(st);
458 goto done;
459 }
460
461 /* If this is the first time mapping to the surface in this
462 * command buffer, clear the dirty masks of this surface.
463 */
464 if (sws->surface_is_flushed(sws, surf)) {
465 svga_clear_texture_dirty(tex);
466 }
467
468 if (need_tex_readback(transfer)) {
469 enum pipe_error ret;
470
471 svga_surfaces_flush(svga);
472
473 if (svga_have_vgpu10(svga)) {
474 ret = readback_image_vgpu10(svga, surf, st->slice, level,
475 tex->b.b.last_level + 1);
476 } else {
477 ret = readback_image_vgpu9(svga, surf, st->slice, level);
478 }
479
480 svga->hud.num_readbacks++;
481 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_TEXREADBACK);
482
483 assert(ret == PIPE_OK);
484 (void) ret;
485
486 svga_context_flush(svga, NULL);
487
488 /*
489 * Note: if PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE were specified
490 * we could potentially clear the flag for all faces/layers/mips.
491 */
492 svga_clear_texture_rendered_to(tex, st->slice, level);
493 }
494 else {
495 assert(usage & PIPE_TRANSFER_WRITE);
496 if ((usage & PIPE_TRANSFER_UNSYNCHRONIZED) == 0) {
497 if (svga_is_texture_dirty(tex, st->slice, level)) {
498 /*
499 * do a surface flush if the subresource has been modified
500 * in this command buffer.
501 */
502 svga_surfaces_flush(svga);
503 if (!sws->surface_is_flushed(sws, surf)) {
504 svga->hud.surface_write_flushes++;
505 SVGA_STATS_COUNT_INC(sws, SVGA_STATS_COUNT_SURFACEWRITEFLUSH);
506 svga_context_flush(svga, NULL);
507 }
508 }
509 }
510 }
511 if (usage & PIPE_TRANSFER_WRITE) {
512 /* mark this texture level as dirty */
513 svga_set_texture_dirty(tex, st->slice, level);
514 }
515 }
516
517 /*
518 * Begin mapping code
519 */
520 if (st->swbuf) {
521 returnVal = st->swbuf;
522 }
523 else if (!use_direct_map) {
524 returnVal = sws->buffer_map(sws, st->hwbuf, usage);
525 }
526 else {
527 SVGA3dSize baseLevelSize;
528 struct svga_winsys_surface *surf = tex->handle;
529 uint8_t *map;
530 boolean retry;
531 unsigned offset, mip_width, mip_height;
532 unsigned xoffset = st->base.box.x;
533 unsigned yoffset = st->base.box.y;
534 unsigned zoffset = st->base.box.z;
535
536 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
537 if (map == NULL && retry) {
538 /*
539 * At this point, the svga_surfaces_flush() should already have
540 * called in svga_texture_get_transfer().
541 */
542 svga_context_flush(svga, NULL);
543 map = svga->swc->surface_map(svga->swc, surf, usage, &retry);
544 }
545
546 /*
547 * Make sure we return NULL if the map fails
548 */
549 if (!map) {
550 FREE(st);
551 returnVal = map;
552 goto done;
553 }
554
555 /**
556 * Compute the offset to the specific texture slice in the buffer.
557 */
558 baseLevelSize.width = tex->b.b.width0;
559 baseLevelSize.height = tex->b.b.height0;
560 baseLevelSize.depth = tex->b.b.depth0;
561
562 if ((tex->b.b.target == PIPE_TEXTURE_1D_ARRAY) ||
563 (tex->b.b.target == PIPE_TEXTURE_2D_ARRAY)) {
564 st->base.layer_stride =
565 svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
566 tex->b.b.last_level + 1, 1, 0);
567 }
568
569 offset = svga3dsurface_get_image_offset(tex->key.format, baseLevelSize,
570 tex->b.b.last_level + 1, /* numMips */
571 st->slice, level);
572 if (level > 0) {
573 assert(offset > 0);
574 }
575
576 mip_width = u_minify(tex->b.b.width0, level);
577 mip_height = u_minify(tex->b.b.height0, level);
578
579 offset += svga3dsurface_get_pixel_offset(tex->key.format,
580 mip_width, mip_height,
581 xoffset, yoffset, zoffset);
582 returnVal = (void *) (map + offset);
583 }
584
585 svga->hud.map_buffer_time += (svga_get_time(svga) - begin);
586 svga->hud.num_resources_mapped++;
587
588 done:
589 SVGA_STATS_TIME_POP(sws);
590 return returnVal;
591 }
592
593
594 /**
595 * Unmap a GB texture surface.
596 */
597 static void
598 svga_texture_surface_unmap(struct svga_context *svga,
599 struct pipe_transfer *transfer)
600 {
601 struct svga_winsys_surface *surf = svga_texture(transfer->resource)->handle;
602 struct svga_winsys_context *swc = svga->swc;
603 boolean rebind;
604
605 assert(surf);
606
607 swc->surface_unmap(swc, surf, &rebind);
608 if (rebind) {
609 enum pipe_error ret;
610 ret = SVGA3D_BindGBSurface(swc, surf);
611 if (ret != PIPE_OK) {
612 /* flush and retry */
613 svga_context_flush(svga, NULL);
614 ret = SVGA3D_BindGBSurface(swc, surf);
615 assert(ret == PIPE_OK);
616 }
617 }
618 }
619
620
621 static enum pipe_error
622 update_image_vgpu9(struct svga_context *svga,
623 struct svga_winsys_surface *surf,
624 const SVGA3dBox *box,
625 unsigned slice,
626 unsigned level)
627 {
628 enum pipe_error ret;
629
630 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
631 if (ret != PIPE_OK) {
632 svga_context_flush(svga, NULL);
633 ret = SVGA3D_UpdateGBImage(svga->swc, surf, box, slice, level);
634 }
635 return ret;
636 }
637
638
639 static enum pipe_error
640 update_image_vgpu10(struct svga_context *svga,
641 struct svga_winsys_surface *surf,
642 const SVGA3dBox *box,
643 unsigned slice,
644 unsigned level,
645 unsigned numMipLevels)
646 {
647 enum pipe_error ret;
648 unsigned subResource;
649
650 subResource = slice * numMipLevels + level;
651 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
652 if (ret != PIPE_OK) {
653 svga_context_flush(svga, NULL);
654 ret = SVGA3D_vgpu10_UpdateSubResource(svga->swc, surf, box, subResource);
655 }
656 return ret;
657 }
658
659
660 static void
661 svga_texture_transfer_unmap(struct pipe_context *pipe,
662 struct pipe_transfer *transfer)
663 {
664 struct svga_context *svga = svga_context(pipe);
665 struct svga_screen *ss = svga_screen(pipe->screen);
666 struct svga_winsys_screen *sws = ss->sws;
667 struct svga_transfer *st = svga_transfer(transfer);
668 struct svga_texture *tex = svga_texture(transfer->resource);
669
670 SVGA_STATS_TIME_PUSH(sws, SVGA_STATS_TIME_TEXTRANSFERUNMAP);
671
672 if (!st->swbuf) {
673 if (st->use_direct_map) {
674 svga_texture_surface_unmap(svga, transfer);
675 }
676 else {
677 sws->buffer_unmap(sws, st->hwbuf);
678 }
679 }
680
681 if (!st->use_direct_map && (st->base.usage & PIPE_TRANSFER_WRITE)) {
682 /* Use DMA to transfer texture data */
683 SVGA3dSurfaceDMAFlags flags;
684
685 memset(&flags, 0, sizeof flags);
686 if (transfer->usage & PIPE_TRANSFER_DISCARD_WHOLE_RESOURCE) {
687 flags.discard = TRUE;
688 }
689 if (transfer->usage & PIPE_TRANSFER_UNSYNCHRONIZED) {
690 flags.unsynchronized = TRUE;
691 }
692
693 svga_transfer_dma(svga, st, SVGA3D_WRITE_HOST_VRAM, flags);
694 } else if (transfer->usage & PIPE_TRANSFER_WRITE) {
695 struct svga_winsys_surface *surf = tex->handle;
696 SVGA3dBox box;
697 enum pipe_error ret;
698 unsigned nlayers = 1;
699
700 assert(svga_have_gb_objects(svga));
701
702 /* update the effected region */
703 box.x = transfer->box.x;
704 box.y = transfer->box.y;
705 box.w = transfer->box.width;
706 box.h = transfer->box.height;
707 box.d = transfer->box.depth;
708
709 switch (tex->b.b.target) {
710 case PIPE_TEXTURE_CUBE:
711 box.z = 0;
712 break;
713 case PIPE_TEXTURE_2D_ARRAY:
714 nlayers = box.d;
715 box.z = 0;
716 box.d = 1;
717 break;
718 case PIPE_TEXTURE_1D_ARRAY:
719 nlayers = box.d;
720 box.y = box.z = 0;
721 box.d = 1;
722 break;
723 default:
724 box.z = transfer->box.z;
725 break;
726 }
727
728 if (0)
729 debug_printf("%s %d, %d, %d %d x %d x %d\n",
730 __FUNCTION__,
731 box.x, box.y, box.z,
732 box.w, box.h, box.d);
733
734 if (svga_have_vgpu10(svga)) {
735 unsigned i;
736 for (i = 0; i < nlayers; i++) {
737 ret = update_image_vgpu10(svga, surf, &box,
738 st->slice + i, transfer->level,
739 tex->b.b.last_level + 1);
740 assert(ret == PIPE_OK);
741 }
742 } else {
743 assert(nlayers == 1);
744 ret = update_image_vgpu9(svga, surf, &box, st->slice, transfer->level);
745 assert(ret == PIPE_OK);
746 }
747
748 svga->hud.num_resource_updates++;
749
750 (void) ret;
751 }
752
753 ss->texture_timestamp++;
754 svga_age_texture_view(tex, transfer->level);
755 if (transfer->resource->target == PIPE_TEXTURE_CUBE)
756 svga_define_texture_level(tex, st->slice, transfer->level);
757 else
758 svga_define_texture_level(tex, 0, transfer->level);
759
760 pipe_resource_reference(&st->base.resource, NULL);
761
762 FREE(st->swbuf);
763 if (!st->use_direct_map) {
764 sws->buffer_destroy(sws, st->hwbuf);
765 }
766 FREE(st);
767 SVGA_STATS_TIME_POP(sws);
768 }
769
770
771 /**
772 * Does format store depth values?
773 */
774 static inline boolean
775 format_has_depth(enum pipe_format format)
776 {
777 const struct util_format_description *desc = util_format_description(format);
778 return util_format_has_depth(desc);
779 }
780
781
782 struct u_resource_vtbl svga_texture_vtbl =
783 {
784 svga_texture_get_handle, /* get_handle */
785 svga_texture_destroy, /* resource_destroy */
786 svga_texture_transfer_map, /* transfer_map */
787 u_default_transfer_flush_region, /* transfer_flush_region */
788 svga_texture_transfer_unmap, /* transfer_unmap */
789 };
790
791
792 struct pipe_resource *
793 svga_texture_create(struct pipe_screen *screen,
794 const struct pipe_resource *template)
795 {
796 struct svga_screen *svgascreen = svga_screen(screen);
797 struct svga_texture *tex;
798 unsigned bindings = template->bind;
799
800 SVGA_STATS_TIME_PUSH(svgascreen->sws,
801 SVGA_STATS_TIME_CREATETEXTURE);
802
803 assert(template->last_level < SVGA_MAX_TEXTURE_LEVELS);
804 if (template->last_level >= SVGA_MAX_TEXTURE_LEVELS) {
805 goto fail_notex;
806 }
807
808 tex = CALLOC_STRUCT(svga_texture);
809 if (!tex) {
810 goto fail_notex;
811 }
812
813 tex->defined = CALLOC(template->depth0 * template->array_size,
814 sizeof(tex->defined[0]));
815 if (!tex->defined) {
816 FREE(tex);
817 goto fail_notex;
818 }
819
820 tex->rendered_to = CALLOC(template->depth0 * template->array_size,
821 sizeof(tex->rendered_to[0]));
822 if (!tex->rendered_to) {
823 goto fail;
824 }
825
826 tex->dirty = CALLOC(template->depth0 * template->array_size,
827 sizeof(tex->dirty[0]));
828 if (!tex->dirty) {
829 goto fail;
830 }
831
832 tex->b.b = *template;
833 tex->b.vtbl = &svga_texture_vtbl;
834 pipe_reference_init(&tex->b.b.reference, 1);
835 tex->b.b.screen = screen;
836
837 tex->key.flags = 0;
838 tex->key.size.width = template->width0;
839 tex->key.size.height = template->height0;
840 tex->key.size.depth = template->depth0;
841 tex->key.arraySize = 1;
842 tex->key.numFaces = 1;
843 tex->key.sampleCount = template->nr_samples;
844
845 if (template->nr_samples > 1) {
846 tex->key.flags |= SVGA3D_SURFACE_MASKABLE_ANTIALIAS;
847 }
848
849 if (svgascreen->sws->have_vgpu10) {
850 switch (template->target) {
851 case PIPE_TEXTURE_1D:
852 tex->key.flags |= SVGA3D_SURFACE_1D;
853 break;
854 case PIPE_TEXTURE_1D_ARRAY:
855 tex->key.flags |= SVGA3D_SURFACE_1D;
856 /* fall-through */
857 case PIPE_TEXTURE_2D_ARRAY:
858 tex->key.flags |= SVGA3D_SURFACE_ARRAY;
859 tex->key.arraySize = template->array_size;
860 break;
861 case PIPE_TEXTURE_3D:
862 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
863 break;
864 case PIPE_TEXTURE_CUBE:
865 tex->key.flags |= (SVGA3D_SURFACE_CUBEMAP | SVGA3D_SURFACE_ARRAY);
866 tex->key.numFaces = 6;
867 break;
868 default:
869 break;
870 }
871 }
872 else {
873 switch (template->target) {
874 case PIPE_TEXTURE_3D:
875 tex->key.flags |= SVGA3D_SURFACE_VOLUME;
876 break;
877 case PIPE_TEXTURE_CUBE:
878 tex->key.flags |= SVGA3D_SURFACE_CUBEMAP;
879 tex->key.numFaces = 6;
880 break;
881 default:
882 break;
883 }
884 }
885
886 tex->key.cachable = 1;
887
888 if ((bindings & (PIPE_BIND_RENDER_TARGET | PIPE_BIND_DEPTH_STENCIL)) &&
889 !(bindings & PIPE_BIND_SAMPLER_VIEW)) {
890 /* Also check if the format can be sampled from */
891 if (screen->is_format_supported(screen, template->format,
892 template->target,
893 template->nr_samples,
894 PIPE_BIND_SAMPLER_VIEW)) {
895 bindings |= PIPE_BIND_SAMPLER_VIEW;
896 }
897 }
898
899 if (bindings & PIPE_BIND_SAMPLER_VIEW) {
900 tex->key.flags |= SVGA3D_SURFACE_HINT_TEXTURE;
901 tex->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
902
903 if (!(bindings & PIPE_BIND_RENDER_TARGET)) {
904 /* Also check if the format is renderable */
905 if (screen->is_format_supported(screen, template->format,
906 template->target,
907 template->nr_samples,
908 PIPE_BIND_RENDER_TARGET)) {
909 bindings |= PIPE_BIND_RENDER_TARGET;
910 }
911 }
912 }
913
914 if (bindings & PIPE_BIND_DISPLAY_TARGET) {
915 tex->key.cachable = 0;
916 }
917
918 if (bindings & PIPE_BIND_SHARED) {
919 tex->key.cachable = 0;
920 }
921
922 if (bindings & (PIPE_BIND_SCANOUT | PIPE_BIND_CURSOR)) {
923 tex->key.scanout = 1;
924 tex->key.cachable = 0;
925 }
926
927 /*
928 * Note: Previously we never passed the
929 * SVGA3D_SURFACE_HINT_RENDERTARGET hint. Mesa cannot
930 * know beforehand whether a texture will be used as a rendertarget or not
931 * and it always requests PIPE_BIND_RENDER_TARGET, therefore
932 * passing the SVGA3D_SURFACE_HINT_RENDERTARGET here defeats its purpose.
933 *
934 * However, this was changed since other state trackers
935 * (XA for example) uses it accurately and certain device versions
936 * relies on it in certain situations to render correctly.
937 */
938 if ((bindings & PIPE_BIND_RENDER_TARGET) &&
939 !util_format_is_s3tc(template->format)) {
940 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
941 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
942 }
943
944 if (bindings & PIPE_BIND_DEPTH_STENCIL) {
945 tex->key.flags |= SVGA3D_SURFACE_HINT_DEPTHSTENCIL;
946 tex->key.flags |= SVGA3D_SURFACE_BIND_DEPTH_STENCIL;
947 }
948
949 tex->key.numMipLevels = template->last_level + 1;
950
951 tex->key.format = svga_translate_format(svgascreen, template->format,
952 bindings);
953 if (tex->key.format == SVGA3D_FORMAT_INVALID) {
954 goto fail;
955 }
956
957 /* The actual allocation is done with a typeless format. Typeless
958 * formats can be reinterpreted as other formats. For example,
959 * SVGA3D_R8G8B8A8_UNORM_TYPELESS can be interpreted as
960 * SVGA3D_R8G8B8A8_UNORM_SRGB or SVGA3D_R8G8B8A8_UNORM.
961 * Do not use typeless formats for SHARED, DISPLAY_TARGET or SCANOUT
962 * buffers.
963 */
964 if (svgascreen->sws->have_vgpu10
965 && ((bindings & (PIPE_BIND_SHARED |
966 PIPE_BIND_DISPLAY_TARGET |
967 PIPE_BIND_SCANOUT)) == 0)) {
968 SVGA3dSurfaceFormat typeless = svga_typeless_format(tex->key.format);
969 if (0) {
970 debug_printf("Convert resource type %s -> %s (bind 0x%x)\n",
971 svga_format_name(tex->key.format),
972 svga_format_name(typeless),
973 bindings);
974 }
975
976 if (svga_format_is_uncompressed_snorm(tex->key.format)) {
977 /* We can't normally render to snorm surfaces, but once we
978 * substitute a typeless format, we can if the rendertarget view
979 * is unorm. This can happen with GL_ARB_copy_image.
980 */
981 tex->key.flags |= SVGA3D_SURFACE_HINT_RENDERTARGET;
982 tex->key.flags |= SVGA3D_SURFACE_BIND_RENDER_TARGET;
983 }
984
985 tex->key.format = typeless;
986 }
987
988 SVGA_DBG(DEBUG_DMA, "surface_create for texture\n", tex->handle);
989 tex->handle = svga_screen_surface_create(svgascreen, bindings,
990 tex->b.b.usage, &tex->key);
991 if (!tex->handle) {
992 goto fail;
993 }
994
995 SVGA_DBG(DEBUG_DMA, " --> got sid %p (texture)\n", tex->handle);
996
997 debug_reference(&tex->b.b.reference,
998 (debug_reference_descriptor)debug_describe_resource, 0);
999
1000 tex->size = util_resource_size(template);
1001 svgascreen->hud.total_resource_bytes += tex->size;
1002 svgascreen->hud.num_resources++;
1003
1004 SVGA_STATS_TIME_POP(svgascreen->sws);
1005
1006 return &tex->b.b;
1007
1008 fail:
1009 if (tex->dirty)
1010 FREE(tex->dirty);
1011 if (tex->rendered_to)
1012 FREE(tex->rendered_to);
1013 if (tex->defined)
1014 FREE(tex->defined);
1015 FREE(tex);
1016 fail_notex:
1017 SVGA_STATS_TIME_POP(svgascreen->sws);
1018 return NULL;
1019 }
1020
1021
1022 struct pipe_resource *
1023 svga_texture_from_handle(struct pipe_screen *screen,
1024 const struct pipe_resource *template,
1025 struct winsys_handle *whandle)
1026 {
1027 struct svga_winsys_screen *sws = svga_winsys_screen(screen);
1028 struct svga_screen *ss = svga_screen(screen);
1029 struct svga_winsys_surface *srf;
1030 struct svga_texture *tex;
1031 enum SVGA3dSurfaceFormat format = 0;
1032 assert(screen);
1033
1034 /* Only supports one type */
1035 if ((template->target != PIPE_TEXTURE_2D &&
1036 template->target != PIPE_TEXTURE_RECT) ||
1037 template->last_level != 0 ||
1038 template->depth0 != 1) {
1039 return NULL;
1040 }
1041
1042 srf = sws->surface_from_handle(sws, whandle, &format);
1043
1044 if (!srf)
1045 return NULL;
1046
1047 if (svga_translate_format(svga_screen(screen), template->format,
1048 template->bind) != format) {
1049 unsigned f1 = svga_translate_format(svga_screen(screen),
1050 template->format, template->bind);
1051 unsigned f2 = format;
1052
1053 /* It's okay for XRGB and ARGB or depth with/out stencil to get mixed up.
1054 */
1055 if (f1 == SVGA3D_B8G8R8A8_UNORM)
1056 f1 = SVGA3D_A8R8G8B8;
1057 if (f1 == SVGA3D_B8G8R8X8_UNORM)
1058 f1 = SVGA3D_X8R8G8B8;
1059
1060 if ( !( (f1 == f2) ||
1061 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_A8R8G8B8) ||
1062 (f1 == SVGA3D_X8R8G8B8 && f2 == SVGA3D_B8G8R8X8_UNORM) ||
1063 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_X8R8G8B8) ||
1064 (f1 == SVGA3D_A8R8G8B8 && f2 == SVGA3D_B8G8R8A8_UNORM) ||
1065 (f1 == SVGA3D_Z_D24X8 && f2 == SVGA3D_Z_D24S8) ||
1066 (f1 == SVGA3D_Z_DF24 && f2 == SVGA3D_Z_D24S8_INT) ) ) {
1067 debug_printf("%s wrong format %s != %s\n", __FUNCTION__,
1068 svga_format_name(f1), svga_format_name(f2));
1069 return NULL;
1070 }
1071 }
1072
1073 tex = CALLOC_STRUCT(svga_texture);
1074 if (!tex)
1075 return NULL;
1076
1077 tex->defined = CALLOC(template->depth0 * template->array_size,
1078 sizeof(tex->defined[0]));
1079 if (!tex->defined) {
1080 FREE(tex);
1081 return NULL;
1082 }
1083
1084 tex->b.b = *template;
1085 tex->b.vtbl = &svga_texture_vtbl;
1086 pipe_reference_init(&tex->b.b.reference, 1);
1087 tex->b.b.screen = screen;
1088
1089 SVGA_DBG(DEBUG_DMA, "wrap surface sid %p\n", srf);
1090
1091 tex->key.cachable = 0;
1092 tex->key.format = format;
1093 tex->handle = srf;
1094
1095 tex->rendered_to = CALLOC(1, sizeof(tex->rendered_to[0]));
1096 if (!tex->rendered_to)
1097 goto fail;
1098
1099 tex->dirty = CALLOC(1, sizeof(tex->dirty[0]));
1100 if (!tex->dirty)
1101 goto fail;
1102
1103 tex->imported = TRUE;
1104
1105 ss->hud.num_resources++;
1106
1107 return &tex->b.b;
1108
1109 fail:
1110 if (tex->defined)
1111 FREE(tex->defined);
1112 if (tex->rendered_to)
1113 FREE(tex->rendered_to);
1114 if (tex->dirty)
1115 FREE(tex->dirty);
1116 FREE(tex);
1117 return NULL;
1118 }
1119
1120 boolean
1121 svga_texture_generate_mipmap(struct pipe_context *pipe,
1122 struct pipe_resource *pt,
1123 enum pipe_format format,
1124 unsigned base_level,
1125 unsigned last_level,
1126 unsigned first_layer,
1127 unsigned last_layer)
1128 {
1129 struct pipe_sampler_view templ, *psv;
1130 struct svga_pipe_sampler_view *sv;
1131 struct svga_context *svga = svga_context(pipe);
1132 struct svga_texture *tex = svga_texture(pt);
1133 enum pipe_error ret;
1134
1135 assert(svga_have_vgpu10(svga));
1136
1137 /* Only support 2D texture for now */
1138 if (pt->target != PIPE_TEXTURE_2D)
1139 return FALSE;
1140
1141 /* Fallback to the mipmap generation utility for those formats that
1142 * do not support hw generate mipmap
1143 */
1144 if (!svga_format_support_gen_mips(format))
1145 return FALSE;
1146
1147 /* Make sure the texture surface was created with
1148 * SVGA3D_SURFACE_BIND_RENDER_TARGET
1149 */
1150 if (!tex->handle || !(tex->key.flags & SVGA3D_SURFACE_BIND_RENDER_TARGET))
1151 return FALSE;
1152
1153 templ.format = format;
1154 templ.u.tex.first_layer = first_layer;
1155 templ.u.tex.last_layer = last_layer;
1156 templ.u.tex.first_level = base_level;
1157 templ.u.tex.last_level = last_level;
1158
1159 psv = pipe->create_sampler_view(pipe, pt, &templ);
1160 if (psv == NULL)
1161 return FALSE;
1162
1163 sv = svga_pipe_sampler_view(psv);
1164 ret = svga_validate_pipe_sampler_view(svga, sv);
1165 if (ret != PIPE_OK) {
1166 svga_context_flush(svga, NULL);
1167 ret = svga_validate_pipe_sampler_view(svga, sv);
1168 assert(ret == PIPE_OK);
1169 }
1170
1171 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1172 if (ret != PIPE_OK) {
1173 svga_context_flush(svga, NULL);
1174 ret = SVGA3D_vgpu10_GenMips(svga->swc, sv->id, tex->handle);
1175 }
1176 pipe_sampler_view_reference(&psv, NULL);
1177
1178 svga->hud.num_generate_mipmap++;
1179
1180 return TRUE;
1181 }